Compare commits
1 Commits
2025-05-en
...
2024-12-mq
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c223891e15 |
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"name": "container.training environment to get started with Docker and/or Kubernetes",
|
||||
"image": "ghcr.io/jpetazzo/shpod",
|
||||
"features": {
|
||||
//"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [],
|
||||
|
||||
//"postCreateCommand": "... install extra packages...",
|
||||
"postStartCommand": "dind.sh",
|
||||
|
||||
// This lets us use "docker-outside-docker".
|
||||
// Unfortunately, minikube, kind, etc. don't work very well that way;
|
||||
// so for now, we'll likely use "docker-in-docker" instead (with a
|
||||
// privilege dcontainer). But we're still exposing that socket in case
|
||||
// someone wants to do something interesting with it.
|
||||
"mounts": ["source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"],
|
||||
|
||||
// This is for docker-in-docker.
|
||||
"privileged": true,
|
||||
|
||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||
"remoteUser": "k8s"
|
||||
}
|
||||
1
.gitignore
vendored
@@ -9,7 +9,6 @@ prepare-labs/terraform/many-kubernetes/one-kubernetes-config/config.tf
|
||||
prepare-labs/terraform/many-kubernetes/one-kubernetes-module/*.tf
|
||||
prepare-labs/terraform/tags
|
||||
prepare-labs/terraform/virtual-machines/openstack/*.tfvars
|
||||
prepare-labs/terraform/virtual-machines/proxmox/*.tfvars
|
||||
prepare-labs/www
|
||||
|
||||
slides/*.yml.html
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM node:4-slim
|
||||
RUN npm install express@4
|
||||
RUN npm install express
|
||||
RUN npm install redis@3
|
||||
COPY files/ /files/
|
||||
COPY webui.js /
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
use-forwarded-headers: true
|
||||
compute-full-forwarded-for: true
|
||||
use-proxy-protocol: true
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: ingress-nginx
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- M6-ingress-nginx-components.yaml
|
||||
- sync.yaml
|
||||
patches:
|
||||
- path: M6-ingress-nginx-cm-patch.yaml
|
||||
target:
|
||||
kind: ConfigMap
|
||||
- path: M6-ingress-nginx-svc-patch.yaml
|
||||
target:
|
||||
kind: Service
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
annotations:
|
||||
service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: true
|
||||
service.beta.kubernetes.io/scw-loadbalancer-use-hostname: true
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: kyverno
|
||||
@@ -1,72 +0,0 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
- HelmRelease
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: kustomizationSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
- name: helmReleaseSourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
- ingress-nginx
|
||||
- kyverno
|
||||
- monitoring
|
||||
- openebs
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- HelmRelease
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.chart.spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.chart.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: monitoring
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: grafana.test.metal.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kube-prometheus-stack-grafana
|
||||
port:
|
||||
number: 80
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
ingress:
|
||||
- from: []
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-db
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: db
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app: web
|
||||
@@ -1,10 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: flux-system
|
||||
app.kubernetes.io/part-of: flux
|
||||
app.kubernetes.io/version: v2.5.1
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
name: openebs
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: openebs
|
||||
resources:
|
||||
- M6-openebs-components.yaml
|
||||
- sync.yaml
|
||||
configMapGenerator:
|
||||
- name: openebs-values
|
||||
files:
|
||||
- values.yaml=M6-openebs-values.yaml
|
||||
configurations:
|
||||
- M6-openebs-kustomizeconfig.yaml
|
||||
@@ -1,6 +0,0 @@
|
||||
nameReference:
|
||||
- kind: ConfigMap
|
||||
version: v1
|
||||
fieldSpecs:
|
||||
- path: spec/valuesFrom/name
|
||||
kind: HelmRelease
|
||||
@@ -1,15 +0,0 @@
|
||||
# helm install openebs --namespace openebs openebs/openebs
|
||||
# --set engines.replicated.mayastor.enabled=false
|
||||
# --set lvm-localpv.lvmNode.kubeletDir=/var/lib/k0s/kubelet/
|
||||
# --create-namespace
|
||||
engines:
|
||||
replicated:
|
||||
mayastor:
|
||||
enabled: false
|
||||
# Needed for k0s install since kubelet install is slightly divergent from vanilla install >:-(
|
||||
lvm-localpv:
|
||||
lvmNode:
|
||||
kubeletDir: /var/lib/k0s/kubelet/
|
||||
localprovisioner:
|
||||
hostpathClass:
|
||||
isDefaultClass: true
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
namespace: rocky-test
|
||||
name: rocky-full-access
|
||||
rules:
|
||||
- apiGroups: ["", extensions, apps]
|
||||
resources: [deployments, replicasets, pods, services, ingresses, statefulsets]
|
||||
verbs: [get, list, watch, create, update, patch, delete] # You can also use [*]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: rocky-pv-access
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: [persistentvolumes]
|
||||
verbs: [get, list, watch, create, patch]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
toolkit.fluxcd.io/tenant: rocky
|
||||
name: rocky-reconciler2
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: rocky-pv-access
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: gotk:rocky-test:reconciler
|
||||
- kind: ServiceAccount
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: rocky.test.mybestdomain.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: web
|
||||
port:
|
||||
number: 80
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ../../base/rocky
|
||||
patches:
|
||||
- path: M6-rocky-test-patch.yaml
|
||||
target:
|
||||
kind: Kustomization
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: rocky
|
||||
namespace: rocky-test
|
||||
spec:
|
||||
path: ./k8s/plain
|
||||
@@ -3,6 +3,7 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
@@ -17,6 +18,5 @@ spec:
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
|
||||
@@ -3,6 +3,7 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -21,7 +22,6 @@ spec:
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
@@ -3,6 +3,7 @@ kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
@@ -21,6 +22,7 @@ spec:
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
failureAction: Enforce
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny: {}
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
|
||||
@@ -6,44 +6,33 @@
|
||||
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
|
||||
# PRO2-XS = 4 core, 16 gb
|
||||
|
||||
set -e
|
||||
|
||||
PROVIDER=scaleway
|
||||
STUDENTS=30
|
||||
|
||||
case "$PROVIDER" in
|
||||
linode)
|
||||
export TF_VAR_node_size=g6-standard-6
|
||||
export TF_VAR_location=us-east
|
||||
export TF_VAR_location=eu-west
|
||||
;;
|
||||
scaleway)
|
||||
export TF_VAR_node_size=PRO2-XS
|
||||
# For tiny testing purposes, these are okay too:
|
||||
#export TF_VAR_node_size=PLAY2-NANO
|
||||
export TF_VAR_location=fr-par-2
|
||||
;;
|
||||
esac
|
||||
|
||||
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
|
||||
|
||||
# set kubeconfig file
|
||||
export KUBECONFIG=~/kubeconfig
|
||||
|
||||
if [ "$PROVIDER" = "kind" ]; then
|
||||
kind create cluster --name konk
|
||||
ADDRTYPE=InternalIP
|
||||
else
|
||||
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
|
||||
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
|
||||
ADDRTYPE=ExternalIP
|
||||
fi
|
||||
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
|
||||
|
||||
# set external_ip labels
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="'$ADDRTYPE'")].address}{"\n"}{end}' |
|
||||
while read node address ignoredaddresses; do
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
|
||||
while read node address; do
|
||||
kubectl label node $node external_ip=$address
|
||||
done
|
||||
|
||||
# vcluster all the things
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students $STUDENTS
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 50
|
||||
|
||||
# install prometheus stack because that's cool
|
||||
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
|
||||
|
||||
@@ -49,41 +49,6 @@ _cmd_clean() {
|
||||
done
|
||||
}
|
||||
|
||||
_cmd codeserver "Install code-server on the clusters"
|
||||
_cmd_codeserver() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
ARCH=${ARCHITECTURE-amd64}
|
||||
CODESERVER_VERSION=4.96.4
|
||||
CODESERVER_URL=https://github.com/coder/code-server/releases/download/v${CODESERVER_VERSION}/code-server-${CODESERVER_VERSION}-linux-${ARCH}.tar.gz
|
||||
pssh "
|
||||
set -e
|
||||
i_am_first_node || exit 0
|
||||
if ! [ -x /usr/local/bin/code-server ]; then
|
||||
curl -fsSL $CODESERVER_URL | sudo tar zx -C /opt
|
||||
sudo ln -s /opt/code-server-${CODESERVER_VERSION}-linux-${ARCH}/bin/code-server /usr/local/bin/code-server
|
||||
sudo -u $USER_LOGIN -H code-server --install-extension ms-azuretools.vscode-docker
|
||||
sudo -u $USER_LOGIN -H code-server --install-extension ms-kubernetes-tools.vscode-kubernetes-tools
|
||||
sudo -u $USER_LOGIN -H mkdir -p /home/$USER_LOGIN/.local/share/code-server/User
|
||||
echo '{\"workbench.startupEditor\": \"terminal\"}' | sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.local/share/code-server/User/settings.json
|
||||
sudo -u $USER_LOGIN mkdir -p /home/$USER_LOGIN/.config/systemd/user
|
||||
sudo -u $USER_LOGIN tee /home/$USER_LOGIN/.config/systemd/user/code-server.service <<EOF
|
||||
[Unit]
|
||||
Description=code-server
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/code-server --bind-addr [::]:1789
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl --user -M $USER_LOGIN@ enable code-server.service --now
|
||||
sudo loginctl enable-linger $USER_LOGIN
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd createuser "Create the user that students will use"
|
||||
_cmd_createuser() {
|
||||
TAG=$1
|
||||
@@ -297,9 +262,20 @@ _cmd_create() {
|
||||
if [ "$CLUSTERSIZE" ]; then
|
||||
echo nodes_per_cluster = $CLUSTERSIZE >> terraform.tfvars
|
||||
fi
|
||||
for RETRY in 1 2 3; do
|
||||
if terraform apply -auto-approve; then
|
||||
touch terraform.ok
|
||||
break
|
||||
fi
|
||||
done
|
||||
if ! [ -f terraform.ok ]; then
|
||||
die "Terraform failed."
|
||||
fi
|
||||
)
|
||||
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
echo create_ok > tags/$TAG/status
|
||||
|
||||
# If the settings.env file has a "STEPS" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
@@ -374,13 +350,9 @@ _cmd_clusterize() {
|
||||
done < /tmp/cluster
|
||||
"
|
||||
|
||||
jq --raw-input --compact-output \
|
||||
--arg USER_LOGIN "$USER_LOGIN" --arg USER_PASSWORD "$USER_PASSWORD" '
|
||||
{
|
||||
"login": $USER_LOGIN,
|
||||
"password": $USER_PASSWORD,
|
||||
"ipaddrs": .
|
||||
}' < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
|
||||
while read line; do
|
||||
printf '{"login": "%s", "password": "%s", "ipaddrs": "%s"}\n' "$USER_LOGIN" "$USER_PASSWORD" "$line"
|
||||
done < tags/$TAG/clusters.tsv > tags/$TAG/logins.jsonl
|
||||
|
||||
echo cluster_ok > tags/$TAG/status
|
||||
}
|
||||
@@ -620,9 +592,7 @@ EOF
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
curl -fsSL https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml |
|
||||
sed s,weaveworks/weave,quay.io/rackspace/weave, |
|
||||
kubectl apply -f-
|
||||
kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s-1.11.yaml
|
||||
fi"
|
||||
|
||||
# FIXME this is a gross hack to add the deployment key to our SSH agent,
|
||||
@@ -978,7 +948,7 @@ _cmd_logins() {
|
||||
need_tag $TAG
|
||||
|
||||
cat tags/$TAG/logins.jsonl \
|
||||
| jq -r '"\(if .codeServerPort then "\(.codeServerPort)\t" else "" end )\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
|
||||
| jq -r '"\(.password)\tssh -l \(.login)\(if .port then " -p \(.port)" else "" end)\t\(.ipaddrs)"'
|
||||
}
|
||||
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
@@ -1120,7 +1090,7 @@ _cmd_tailhist () {
|
||||
set -e
|
||||
sudo apt-get install unzip -y
|
||||
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
unzip -o websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
sudo mv websocketd /usr/local/bin/websocketd
|
||||
sudo mkdir -p /opt/tailhist
|
||||
sudo tee /opt/tailhist.service <<EOF
|
||||
@@ -1143,35 +1113,14 @@ EOF
|
||||
pssh -I sudo tee /opt/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd terraform "Apply Terraform configuration to provision resources."
|
||||
_cmd_terraform() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
echo terraforming > tags/$TAG/status
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform apply -auto-approve
|
||||
# The Terraform provider for Proxmox has a bug; sometimes it fails
|
||||
# to obtain VM address from the QEMU agent. In that case, we put
|
||||
# ERROR in the ips.txt file (instead of the VM IP address). Detect
|
||||
# that so that we run Terraform again (this typically solves the issue).
|
||||
if grep -q ERROR ips.txt; then
|
||||
die "Couldn't obtain IP address of some machines. Try to re-run terraform."
|
||||
fi
|
||||
)
|
||||
echo terraformed > tags/$TAG/status
|
||||
|
||||
}
|
||||
|
||||
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
|
||||
_cmd_tools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
set -e
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -qy install apache2-utils argon2 emacs-nox git httping htop jid joe jq mosh tree unzip
|
||||
sudo apt-get -qy install apache2-utils emacs-nox git httping htop jid joe jq mosh python-setuptools tree unzip
|
||||
# This is for VMs with broken PRNG (symptom: running docker-compose randomly hangs)
|
||||
sudo apt-get -qy install haveged
|
||||
"
|
||||
@@ -1311,13 +1260,7 @@ _cmd_passwords() {
|
||||
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
|
||||
info "Setting password for $nodes..."
|
||||
for node in $nodes; do
|
||||
echo $USER_LOGIN $password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node '
|
||||
read login password
|
||||
echo $login:$password | sudo chpasswd
|
||||
hashedpassword=$(echo -n $password | argon2 saltysalt$RANDOM -e)
|
||||
sudo -u $login mkdir -p /home/$login/.config/code-server
|
||||
echo "hashed-password: \"$hashedpassword\"" | sudo -u $login tee /home/$login/.config/code-server/config.yaml >/dev/null
|
||||
'
|
||||
echo $USER_LOGIN:$password | ssh $SSHOPTS -i tags/$TAG/id_rsa ubuntu@$node sudo chpasswd
|
||||
done
|
||||
done
|
||||
info "Done."
|
||||
@@ -1349,11 +1292,6 @@ _cmd_wait() {
|
||||
pssh -l $SSH_USER "
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
cloud-init status --wait
|
||||
case $? in
|
||||
0) exit 0;; # all is good
|
||||
2) exit 0;; # recoverable error (happens with proxmox deprecated cloud-init payloads)
|
||||
*) exit 1;; # all other problems
|
||||
esac
|
||||
fi"
|
||||
}
|
||||
|
||||
@@ -1396,7 +1334,7 @@ WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/webssh
|
||||
ExecStart=/usr/bin/env python3 run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
|
||||
@@ -7,7 +7,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,7 +7,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -11,7 +11,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -10,7 +10,6 @@ USER_PASSWORD=training
|
||||
KUBEVERSION=1.28.9
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,7 +6,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,7 +6,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -6,7 +6,6 @@ USER_LOGIN=docker
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
@@ -15,5 +14,6 @@ STEPS="
|
||||
createuser
|
||||
webssh
|
||||
tailhist
|
||||
cards
|
||||
ips
|
||||
"
|
||||
"
|
||||
@@ -3,4 +3,4 @@ CLUSTERSIZE=5
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="terraform stage2"
|
||||
STEPS="stage2"
|
||||
|
||||
@@ -6,7 +6,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -7,7 +7,6 @@ USER_LOGIN=k8s
|
||||
USER_PASSWORD=training
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="terraform stage2"
|
||||
STEPS="stage2"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#export TF_VAR_node_size=GP4.4
|
||||
#export TF_VAR_node_size=GP2.4
|
||||
#export TF_VAR_node_size=g6-standard-6
|
||||
#export TF_VAR_node_size=m7i.xlarge
|
||||
|
||||
@@ -11,7 +11,6 @@ USER_LOGIN=portal
|
||||
USER_PASSWORD=CHANGEME
|
||||
|
||||
STEPS="
|
||||
terraform
|
||||
wait
|
||||
standardize
|
||||
clusterize
|
||||
|
||||
@@ -69,7 +69,7 @@ body {
|
||||
body {
|
||||
width: 6.75in; /* two cards wide */
|
||||
margin-left: 0.875in; /* (8.5in - 6.75in)/2 */
|
||||
margin-top: 0.1875in; /* (11in - 5 cards)/2 */
|
||||
margin-top: 0; /* NOTE: we have to manually specify a top margin of e.g. 0.1875in when printing */
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -8,12 +8,11 @@ backside: |
|
||||
Thanks for attending the Asynchronous Architecture Patterns workshop at QCON!
|
||||
</p>
|
||||
<p>
|
||||
<b>This QR code will give you my contact info</b> as well as a link to a feedback form.
|
||||
If you'd like me to send you a copy of the recording of the workshop
|
||||
and of the training materials,
|
||||
please scan that QR code to leave me your
|
||||
contact information. Thank you!
|
||||
</p>
|
||||
<p>
|
||||
If you liked this workshop, I can train your team, in person or online, with custom
|
||||
courses of any length and any level, on Docker, Kubernetes, and MLops.
|
||||
</p>
|
||||
qrcode: https://2024-11-qconsf.container.training/#contact
|
||||
qrcode: https://2024-11-qconsf.container.training/q
|
||||
thing: Kubernetes cluster
|
||||
image: logo-kubernetes.png
|
||||
image: logo-bento.svg
|
||||
|
||||
@@ -14,20 +14,6 @@ provider "kubernetes" {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
alias = "cluster_${index}"
|
||||
kubernetes {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
}
|
||||
|
||||
# Password used for SSH and code-server access
|
||||
resource "random_string" "shpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -35,57 +21,120 @@ resource "kubernetes_namespace" "shpod_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
data "kubernetes_service" "shpod_${index}" {
|
||||
depends_on = [ helm_release.shpod_${index} ]
|
||||
resource "kubernetes_deployment" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "shpod"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
service_account_name = "shpod"
|
||||
container {
|
||||
image = "jpetazzo/shpod"
|
||||
name = "shpod"
|
||||
env {
|
||||
name = "PASSWORD"
|
||||
value = random_string.shpod_${index}.result
|
||||
}
|
||||
lifecycle {
|
||||
post_start {
|
||||
exec {
|
||||
command = [ "sh", "-c", "curl http://myip.enix.org/REMOTE_ADDR > /etc/HOSTIP || true" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
resources {
|
||||
limits = {
|
||||
cpu = "2"
|
||||
memory = "500M"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "250M"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "shpod_${index}" {
|
||||
provider = helm.cluster_${index}
|
||||
repository = "https://shpod.in"
|
||||
chart = "shpod"
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
create_namespace = false
|
||||
set {
|
||||
name = "service.type"
|
||||
value = "NodePort"
|
||||
resource "kubernetes_service" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
lifecycle {
|
||||
# Folks might alter their shpod Service to expose extra ports.
|
||||
# Don't reset their changes.
|
||||
ignore_changes = [ spec ]
|
||||
}
|
||||
set {
|
||||
name = "resources.requests.cpu"
|
||||
value = "100m"
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
set {
|
||||
name = "resources.requests.memory"
|
||||
value = "500M"
|
||||
spec {
|
||||
selector = {
|
||||
app = "shpod"
|
||||
}
|
||||
port {
|
||||
name = "ssh"
|
||||
port = 22
|
||||
target_port = 22
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
set {
|
||||
name = "resources.limits.cpu"
|
||||
value = "1"
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
namespace = kubernetes_namespace.shpod_${index}.metadata.0.name
|
||||
}
|
||||
set {
|
||||
name = "resources.limits.memory"
|
||||
value = "1000M"
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "shpod_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod"
|
||||
}
|
||||
set {
|
||||
name = "persistentVolume.enabled"
|
||||
value = "true"
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
set {
|
||||
name = "ssh.password"
|
||||
value = random_string.shpod_${index}.result
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = "shpod"
|
||||
namespace = "shpod"
|
||||
}
|
||||
set {
|
||||
name = "rbac.cluster.clusterRoles"
|
||||
value = "{cluster-admin}"
|
||||
subject {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "Group"
|
||||
name = "shpod-cluster-admins"
|
||||
}
|
||||
set {
|
||||
name = "codeServer.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
resource "random_string" "shpod_${index}" {
|
||||
length = 6
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
alias = "cluster_${index}"
|
||||
kubernetes {
|
||||
config_path = "./kubeconfig.${index}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,36 +156,6 @@ resource "helm_release" "metrics_server_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
# This section here deserves a little explanation.
|
||||
#
|
||||
# When we access a cluster with shpod (either through SSH or code-server)
|
||||
# there is no kubeconfig file - we simply use "in-cluster" authentication
|
||||
# with a ServiceAccount token. This is a bit unusual, and ideally, I would
|
||||
# prefer to have a "normal" kubeconfig file in the students' shell.
|
||||
#
|
||||
# So what we're doing here, is that we're populating a ConfigMap with
|
||||
# a kubeconfig file; and in the initialization scripts (e.g. bashrc) we
|
||||
# automatically download the kubeconfig file from the ConfigMap and place
|
||||
# it in ~/.kube/kubeconfig.
|
||||
#
|
||||
# But, which kubeconfig file should we use? We could use the "normal"
|
||||
# kubeconfig file that was generated by the provider; but in some cases,
|
||||
# that kubeconfig file might use a token instead of a certificate for
|
||||
# user authentication - and ideally, I would like to have a certificate
|
||||
# so that in the section about auth and RBAC, we can dissect that TLS
|
||||
# certificate and explain where our permissions come from.
|
||||
#
|
||||
# So we're creating a TLS key pair; using the CSR API to issue a user
|
||||
# certificate belongong to a special group; and grant the cluster-admin
|
||||
# role to that group; then we use the kubeconfig file generated by the
|
||||
# provider but override the user with that TLS key pair.
|
||||
#
|
||||
# This is not strictly necessary but it streamlines the lesson on auth.
|
||||
#
|
||||
# Lastly - in the ConfigMap we actually put both the original kubeconfig,
|
||||
# and the one where we injected our new user (just in case we want to
|
||||
# use or look at the original for any reason).
|
||||
|
||||
resource "kubernetes_config_map" "kubeconfig_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -183,23 +202,6 @@ resource "tls_cert_request" "cluster_admin_${index}" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "shpod_cluster_admin_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
name = "shpod-cluster-admin"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
subject {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "Group"
|
||||
name = "shpod-cluster-admins"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_certificate_signing_request_v1" "cluster_admin_${index}" {
|
||||
provider = kubernetes.cluster_${index}
|
||||
metadata {
|
||||
@@ -232,8 +234,7 @@ output "logins_jsonl" {
|
||||
jsonencode({
|
||||
login = "k8s",
|
||||
password = random_string.shpod_${index}.result,
|
||||
port = data.kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
|
||||
codeServerPort = data.kubernetes_service.shpod_${index}.spec[0].port[1].node_port,
|
||||
port = kubernetes_service.shpod_${index}.spec[0].port[0].node_port,
|
||||
ipaddrs = replace(file("./externalips.${index}"), " ", "\t"),
|
||||
}),
|
||||
%{ endfor ~}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
variable "proxmox_endpoint" {
|
||||
type = string
|
||||
default = "https://localhost:8006/"
|
||||
}
|
||||
|
||||
variable "proxmox_username" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_password" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_storage" {
|
||||
type = string
|
||||
default = "local"
|
||||
}
|
||||
|
||||
variable "proxmox_template_node_name" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "proxmox_template_vm_id" {
|
||||
type = number
|
||||
default = null
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Since node size needs to be a string...
|
||||
# To indicate number of CPUs + RAM, just pass it as a string with a space between them.
|
||||
# RAM is in megabytes.
|
||||
variable "node_sizes" {
|
||||
type = map(any)
|
||||
default = {
|
||||
S = "1 2048"
|
||||
M = "2 4096"
|
||||
L = "3 8192"
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,6 @@ locals {
|
||||
cluster_name = format("%s-%03d", var.tag, cn[0])
|
||||
node_name = format("%s-%03d-%03d", var.tag, cn[0], cn[1])
|
||||
node_size = lookup(var.node_sizes, var.node_size, var.node_size)
|
||||
node_index = cn[0] * var.nodes_per_cluster + cn[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../common.tf
|
||||
@@ -1 +0,0 @@
|
||||
../../providers/proxmox/config.tf
|
||||
@@ -1,79 +0,0 @@
|
||||
data "proxmox_virtual_environment_nodes" "_" {}
|
||||
|
||||
locals {
|
||||
pve_nodes = data.proxmox_virtual_environment_nodes._.names
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_vm" "_" {
|
||||
node_name = local.pve_nodes[each.value.node_index % length(local.pve_nodes)]
|
||||
for_each = local.nodes
|
||||
name = each.value.node_name
|
||||
tags = ["container.training", var.tag]
|
||||
stop_on_destroy = true
|
||||
cpu {
|
||||
cores = split(" ", each.value.node_size)[0]
|
||||
type = "x86-64-v2-AES" # recommended for modern CPUs
|
||||
}
|
||||
memory {
|
||||
dedicated = split(" ", each.value.node_size)[1]
|
||||
}
|
||||
#disk {
|
||||
# datastore_id = var.proxmox_storage
|
||||
# file_id = proxmox_virtual_environment_file._.id
|
||||
# interface = "scsi0"
|
||||
# size = 30
|
||||
# discard = "on"
|
||||
#}
|
||||
clone {
|
||||
vm_id = var.proxmox_template_vm_id
|
||||
node_name = var.proxmox_template_node_name
|
||||
full = false
|
||||
}
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
initialization {
|
||||
datastore_id = var.proxmox_storage
|
||||
user_account {
|
||||
username = "ubuntu"
|
||||
keys = [trimspace(tls_private_key.ssh.public_key_openssh)]
|
||||
}
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "dhcp"
|
||||
#gateway =
|
||||
}
|
||||
}
|
||||
}
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
}
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
}
|
||||
|
||||
#resource "proxmox_virtual_environment_download_file" "ubuntu_2404_20250115" {
|
||||
# content_type = "iso"
|
||||
# datastore_id = "cephfs"
|
||||
# node_name = "pve-lsd-1"
|
||||
# url = "https://cloud-images.ubuntu.com/releases/24.04/release-20250115/ubuntu-24.04-server-cloudimg-amd64.img"
|
||||
# file_name = "ubuntu_2404_20250115.img"
|
||||
#}
|
||||
#
|
||||
#resource "proxmox_virtual_environment_file" "_" {
|
||||
# datastore_id = "cephfs"
|
||||
# node_name = "pve-lsd-1"
|
||||
# source_file {
|
||||
# path = "/root/noble-server-cloudimg-amd64.img"
|
||||
# }
|
||||
#}
|
||||
|
||||
locals {
|
||||
ip_addresses = {
|
||||
for key, value in local.nodes :
|
||||
key => [for addr in flatten(concat(proxmox_virtual_environment_vm._[key].ipv4_addresses, ["ERROR"])) :
|
||||
addr if addr != "127.0.0.1"][0]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "~> 0.70.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox_endpoint
|
||||
username = var.proxmox_username
|
||||
password = var.proxmox_password
|
||||
insecure = true
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
# If you want to deploy to Proxmox, you need to:
|
||||
# 1) copy that file to e.g. myproxmoxcluster.tfvars
|
||||
# 2) make sure you have a VM template with QEMU agent pre-installed
|
||||
# 3) customize the copy (you need to replace all the CHANGEME values)
|
||||
# 4) deploy with "labctl create --provider proxmox/myproxmoxcluster ..."
|
||||
|
||||
proxmox_endpoint = "https://localhost:8006/"
|
||||
proxmox_username = "terraform@pve"
|
||||
proxmox_password = "CHANGEME"
|
||||
|
||||
# Which storage to use for VM disks. Defaults to "local".
|
||||
#proxmox_storage = "ceph"
|
||||
|
||||
proxmox_template_node_name = "CHANGEME"
|
||||
proxmox_template_vm_id = CHANGEME
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../providers/proxmox/variables.tf
|
||||
68
slides/1.yml
@@ -1,68 +0,0 @@
|
||||
title: |
|
||||
Docker Intensif
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2025-05-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- # DAY 3
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- # DAY 4
|
||||
- containers/Buildkit.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
- containers/Orchestration_Overview.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
- shared/thankyou.md
|
||||
#- containers/links.md
|
||||
92
slides/2.yml
@@ -1,92 +0,0 @@
|
||||
title: |
|
||||
Fondamentaux Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2025-05-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/yaml-dockercoins-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- shared/toc.md
|
||||
- # 1
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- # 2
|
||||
- shared/yaml.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/namespaces.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- exercises/yaml-dockercoins-details.md
|
||||
- exercises/localcluster-details.md
|
||||
- # 3
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/healthchecks-details.md
|
||||
- # 4
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/ingress-advanced.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/batch-jobs.md
|
||||
- shared/thankyou.md
|
||||
47
slides/3.yml
@@ -1,47 +0,0 @@
|
||||
title: |
|
||||
Packaging d'applications
|
||||
pour Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2025-05-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/demo-apps.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- exercises/helm-generic-chart-details.md
|
||||
-
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
- exercises/helm-umbrella-chart-details.md
|
||||
-
|
||||
- k8s/helmfile.md
|
||||
- k8s/ytt.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/flux.md
|
||||
- k8s/argocd.md
|
||||
- shared/thankyou.md
|
||||
74
slides/4.yml
@@ -1,74 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Avancé
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2025-05-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- exercises/netpol-brief.md
|
||||
- exercises/sealed-secrets-brief.md
|
||||
- exercises/rbac-brief.md
|
||||
- exercises/kyverno-ingress-domain-name-brief.md
|
||||
- exercises/reqlim-brief.md
|
||||
- #1
|
||||
- k8s/demo-apps.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/ingress-tls.md
|
||||
- exercises/netpol-details.md
|
||||
- exercises/sealed-secrets-details.md
|
||||
- exercises/rbac-details.md
|
||||
- #2
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/admission.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/kyverno.md
|
||||
- exercises/kyverno-ingress-domain-name-details.md
|
||||
- #3
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/hpa-v2.md
|
||||
- exercises/reqlim-details.md
|
||||
- #4
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/eck.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- shared/thankyou.md
|
||||
71
slides/5.yml
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Opérer Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://training.enix.io/mattermost)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2025-05-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
-
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc-easy.md
|
||||
- k8s/dmuc-medium.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/staticpods.md
|
||||
- exercises/dmuc-auth-details.md
|
||||
- exercises/dmuc-networking-details.md
|
||||
- exercises/dmuc-staticpods-details.md
|
||||
-
|
||||
- k8s/dmuc-hard.md
|
||||
- k8s/apilb.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
#- k8s/interco.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
#- k8s/cloud-controller-manager.md
|
||||
-
|
||||
- k8s/M6-START-a-company-scenario.md
|
||||
- k8s/M6-T02-flux-install.md
|
||||
- k8s/M6-T03-installing-tenants.md
|
||||
- k8s/M6-R01-flux_configure-ROCKY-deployment.md
|
||||
- k8s/M6-T05-ingress-config.md
|
||||
- k8s/M6-M01-adding-MOVY-tenant.md
|
||||
- k8s/M6-K01-METAL-install.md
|
||||
- k8s/M6-K03-openebs-install.md
|
||||
- k8s/M6-monitoring-stack-install.md
|
||||
- k8s/M6-kyverno-install.md
|
||||
- shared/thankyou.md
|
||||
#-
|
||||
# |
|
||||
# # (Extra content)
|
||||
# - k8s/apiserver-deepdive.md
|
||||
# - k8s/setup-overview.md
|
||||
# - k8s/setup-devel.md
|
||||
# - k8s/setup-managed.md
|
||||
# - k8s/setup-selfhosted.md
|
||||
@@ -2,6 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /mq.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -12,7 +13,7 @@
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
/q https://docs.google.com/forms/d/e/1FAIpQLScYloWur4uVhKgVNIdUrfHZ8pk_mBmPcQwmbhjK2FlR9KWDCA/viewform
|
||||
|
||||
# Shortlinks for next training in English and French
|
||||
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
@@ -21,7 +22,3 @@
|
||||
/us https://www.ardanlabs.com/live-training-events/deploying-microservices-and-traditional-applications-with-kubernetes-march-28-2022.html
|
||||
/uk https://skillsmatter.com/workshops/827-deploying-microservices-and-traditional-applications-with-kubernetes-with-jerome-petazzoni
|
||||
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
/ /highfive.html 200!
|
||||
|
||||
@@ -32,7 +32,7 @@ Compose enables a simple, powerful onboarding workflow:
|
||||
|
||||
1. Checkout our code.
|
||||
|
||||
2. Run `docker compose up`.
|
||||
2. Run `docker-compose up`.
|
||||
|
||||
3. Our app is up and running!
|
||||
|
||||
@@ -66,19 +66,19 @@ class: pic
|
||||
|
||||
1. Write Dockerfiles
|
||||
|
||||
2. Describe our stack of containers in a YAML file (the "Compose file")
|
||||
2. Describe our stack of containers in a YAML file called `docker-compose.yml`
|
||||
|
||||
3. `docker compose up` (or `docker compose up -d` to run in the background)
|
||||
3. `docker-compose up` (or `docker-compose up -d` to run in the background)
|
||||
|
||||
4. Compose pulls and builds the required images, and starts the containers
|
||||
|
||||
5. Compose shows the combined logs of all the containers
|
||||
|
||||
(if running in the background, use `docker compose logs`)
|
||||
(if running in the background, use `docker-compose logs`)
|
||||
|
||||
6. Hit Ctrl-C to stop the whole stack
|
||||
|
||||
(if running in the background, use `docker compose stop`)
|
||||
(if running in the background, use `docker-compose stop`)
|
||||
|
||||
---
|
||||
|
||||
@@ -86,11 +86,11 @@ class: pic
|
||||
|
||||
After making changes to our source code, we can:
|
||||
|
||||
1. `docker compose build` to rebuild container images
|
||||
1. `docker-compose build` to rebuild container images
|
||||
|
||||
2. `docker compose up` to restart the stack with the new images
|
||||
2. `docker-compose up` to restart the stack with the new images
|
||||
|
||||
We can also combine both with `docker compose up --build`
|
||||
We can also combine both with `docker-compose up --build`
|
||||
|
||||
Compose will be smart, and only recreate the containers that have changed.
|
||||
|
||||
@@ -114,7 +114,7 @@ cd trainingwheels
|
||||
Second step: start the app.
|
||||
|
||||
```bash
|
||||
docker compose up
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
Watch Compose build and run the app.
|
||||
@@ -141,17 +141,7 @@ After ten seconds (or if we press `^C` again) it will forcibly kill them.
|
||||
|
||||
---
|
||||
|
||||
## The Compose file
|
||||
|
||||
* Historically: docker-compose.yml or .yaml
|
||||
|
||||
* Recently (kind of): can also be named compose.yml or .yaml
|
||||
|
||||
(Since [version 1.28.6, March 2021](https://docs.docker.com/compose/releases/release-notes/#1286))
|
||||
|
||||
---
|
||||
|
||||
## Example
|
||||
## The `docker-compose.yml` file
|
||||
|
||||
Here is the file used in the demo:
|
||||
|
||||
@@ -182,9 +172,9 @@ services:
|
||||
|
||||
A Compose file has multiple sections:
|
||||
|
||||
* `services` is mandatory. Each service corresponds to a container.
|
||||
* `version` is mandatory. (Typically use "3".)
|
||||
|
||||
* `version` is optional (it used to be mandatory). It can be ignored.
|
||||
* `services` is mandatory. Each service corresponds to a container.
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-compose-file network.)
|
||||
@@ -193,24 +183,24 @@ A Compose file has multiple sections:
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file versions
|
||||
|
||||
* Version 1 is legacy and shouldn't be used.
|
||||
|
||||
(If you see a Compose file without a `services` block, it's a legacy v1 file.)
|
||||
(If you see a Compose file without `version` and `services`, it's a legacy v1 file.)
|
||||
|
||||
* Version 2 added support for networks and volumes.
|
||||
|
||||
* Version 3 added support for deployment options (scaling, rolling updates, etc).
|
||||
|
||||
* Typically use `version: "3"`.
|
||||
|
||||
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
|
||||
has excellent information about the Compose file format if you need to know more about versions.
|
||||
|
||||
---
|
||||
|
||||
## Containers in Compose file
|
||||
## Containers in `docker-compose.yml`
|
||||
|
||||
Each service in the YAML file must contain either `build`, or `image`.
|
||||
|
||||
@@ -288,7 +278,7 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
|
||||
|
||||
`frontcopy_www`, `frontcopy_www_1`, `frontcopy_db_1`
|
||||
|
||||
- Alternatively, use `docker compose -p frontcopy`
|
||||
- Alternatively, use `docker-compose -p frontcopy`
|
||||
|
||||
(to set the `--project-name` of a stack, which default to the dir name)
|
||||
|
||||
@@ -298,10 +288,10 @@ For the full list, check: https://docs.docker.com/compose/compose-file/
|
||||
|
||||
## Checking stack status
|
||||
|
||||
We have `ps`, `docker ps`, and similarly, `docker compose ps`:
|
||||
We have `ps`, `docker ps`, and similarly, `docker-compose ps`:
|
||||
|
||||
```bash
|
||||
$ docker compose ps
|
||||
$ docker-compose ps
|
||||
Name Command State Ports
|
||||
----------------------------------------------------------------------------
|
||||
trainingwheels_redis_1 /entrypoint.sh red Up 6379/tcp
|
||||
@@ -320,13 +310,13 @@ If you have started your application in the background with Compose and
|
||||
want to stop it easily, you can use the `kill` command:
|
||||
|
||||
```bash
|
||||
$ docker compose kill
|
||||
$ docker-compose kill
|
||||
```
|
||||
|
||||
Likewise, `docker compose rm` will let you remove containers (after confirmation):
|
||||
Likewise, `docker-compose rm` will let you remove containers (after confirmation):
|
||||
|
||||
```bash
|
||||
$ docker compose rm
|
||||
$ docker-compose rm
|
||||
Going to remove trainingwheels_redis_1, trainingwheels_www_1
|
||||
Are you sure? [yN] y
|
||||
Removing trainingwheels_redis_1...
|
||||
@@ -337,19 +327,19 @@ Removing trainingwheels_www_1...
|
||||
|
||||
## Cleaning up (2)
|
||||
|
||||
Alternatively, `docker compose down` will stop and remove containers.
|
||||
Alternatively, `docker-compose down` will stop and remove containers.
|
||||
|
||||
It will also remove other resources, like networks that were created for the application.
|
||||
|
||||
```bash
|
||||
$ docker compose down
|
||||
$ docker-compose down
|
||||
Stopping trainingwheels_www_1 ... done
|
||||
Stopping trainingwheels_redis_1 ... done
|
||||
Removing trainingwheels_www_1 ... done
|
||||
Removing trainingwheels_redis_1 ... done
|
||||
```
|
||||
|
||||
Use `docker compose down -v` to remove everything including volumes.
|
||||
Use `docker-compose down -v` to remove everything including volumes.
|
||||
|
||||
---
|
||||
|
||||
@@ -379,15 +369,15 @@ Use `docker compose down -v` to remove everything including volumes.
|
||||
|
||||
- If the container is deleted, the volume gets orphaned
|
||||
|
||||
- Example: `docker compose down && docker compose up`
|
||||
- Example: `docker-compose down && docker-compose up`
|
||||
|
||||
- the old volume still exists, detached from its container
|
||||
|
||||
- a new volume gets created
|
||||
|
||||
- `docker compose down -v`/`--volumes` deletes volumes
|
||||
- `docker-compose down -v`/`--volumes` deletes volumes
|
||||
|
||||
(but **not** `docker compose down && docker compose down -v`!)
|
||||
(but **not** `docker-compose down && docker-compose down -v`!)
|
||||
|
||||
---
|
||||
|
||||
@@ -406,9 +396,9 @@ volumes:
|
||||
|
||||
- Volume will be named `<project>_data`
|
||||
|
||||
- It won't be orphaned with `docker compose down`
|
||||
- It won't be orphaned with `docker-compose down`
|
||||
|
||||
- It will correctly be removed with `docker compose down -v`
|
||||
- It will correctly be removed with `docker-compose down -v`
|
||||
|
||||
---
|
||||
|
||||
@@ -427,7 +417,7 @@ services:
|
||||
|
||||
(for migration, backups, disk usage accounting...)
|
||||
|
||||
- Won't be removed by `docker compose down -v`
|
||||
- Won't be removed by `docker-compose down -v`
|
||||
|
||||
---
|
||||
|
||||
@@ -461,7 +451,7 @@ services:
|
||||
|
||||
- This is used when bringing up individual services
|
||||
|
||||
(e.g. `docker compose up blah` or `docker compose run foo`)
|
||||
(e.g. `docker-compose up blah` or `docker-compose run foo`)
|
||||
|
||||
⚠️ It doesn't make a service "wait" for another one to be up!
|
||||
|
||||
@@ -481,9 +471,7 @@ class: extra-details
|
||||
|
||||
- `docker compose` command to deploy Compose stacks to some clouds
|
||||
|
||||
- in Go instead of Python
|
||||
|
||||
- progressively getting feature parity with `docker compose`
|
||||
- progressively getting feature parity with `docker-compose`
|
||||
|
||||
- also provides numerous improvements (e.g. leverages BuildKit by default)
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# Exercise — enable auth
|
||||
|
||||
- We want to enable authentication and authorization
|
||||
|
||||
- Checklist:
|
||||
|
||||
- non-privileged user can deploy in their namespace
|
||||
<br/>(and nowhere else)
|
||||
|
||||
- each controller uses its own key, certificate, and identity
|
||||
|
||||
- each node uses its own key, certificate, and identity
|
||||
|
||||
- Service Accounts work properly
|
||||
|
||||
- See next slide for help / hints!
|
||||
|
||||
---
|
||||
|
||||
## Checklist
|
||||
|
||||
- Generate keys, certs, and kubeconfig for everything that needs them
|
||||
|
||||
(cluster admin, cluster user, controller manager, scheduler, kubelet)
|
||||
|
||||
- Reconfigure and restart each component to use its new identity
|
||||
|
||||
- Turn on `RBAC` and `Node` authorizers on the API server
|
||||
|
||||
- Check that everything works properly
|
||||
|
||||
(e.g. that you can create and scale a Deployment using the "cluster user" identity)
|
||||
@@ -1,51 +0,0 @@
|
||||
# Exercise — networking
|
||||
|
||||
- We want to install extra networking components:
|
||||
|
||||
- a CNI configuration
|
||||
|
||||
- kube-proxy
|
||||
|
||||
- CoreDNS
|
||||
|
||||
- After doing that, we should be able to deploy a "complex" app
|
||||
|
||||
(with multiple containers communicating together + service discovery)
|
||||
|
||||
---
|
||||
|
||||
## CNI
|
||||
|
||||
- Easy option: Weave
|
||||
|
||||
https://github.com/weaveworks/weave/releases
|
||||
|
||||
- Better option: Cilium
|
||||
|
||||
https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/#install-the-cilium-cli
|
||||
|
||||
or https://docs.cilium.io/en/stable/installation/k8s-install-helm/#installation-using-helm
|
||||
|
||||
---
|
||||
|
||||
## kube-proxy
|
||||
|
||||
- Option 1: author a DaemonSet
|
||||
|
||||
- Option 2: leverage the CNI (some CNIs like Cilium can replace kube-proxy)
|
||||
|
||||
---
|
||||
|
||||
## CoreDNS
|
||||
|
||||
- Suggested method: Helm chart
|
||||
|
||||
(available on https://github.com/coredns/helm)
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
- Try to deploy DockerCoins and confirm that it works
|
||||
|
||||
(for instance with [this YAML file](https://raw.githubusercontent.com/jpetazzo/container.training/refs/heads/main/k8s/dockercoins.yaml))
|
||||
@@ -1,22 +0,0 @@
|
||||
# Exercise — static pods
|
||||
|
||||
- We want to run the control plane in static pods
|
||||
|
||||
(etcd, API server, controller manager, scheduler)
|
||||
|
||||
- For Kubernetes components, we can use [these images](https://kubernetes.io/releases/download/#container-images)
|
||||
|
||||
- For etcd, we can use [this image](https://quay.io/repository/coreos/etcd?tab=tags)
|
||||
|
||||
- If we're using keys, certificates... We can use [hostPath volumes](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
After authoring our static pod manifests and placing them in the right directory,
|
||||
we should be able to start our cluster simply by starting kubelet.
|
||||
|
||||
(Assuming that the container engine is already running.)
|
||||
|
||||
For bonus points: write and enable a systemd unit for kubelet!
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
- it should initially show a few milliseconds latency
|
||||
|
||||
- that will increase when we scale up the number of `worker` Pods
|
||||
- that will increase when we scale up
|
||||
|
||||
- it will also let us detect when the service goes "boom"
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@ When a Service gets created...
|
||||
|
||||
- We want to use a Kyverno `generate` ClusterPolicy
|
||||
|
||||
- For step 1, check [Generate Resources](https://kyverno.io/docs/policy-types/cluster-policy/generate/) documentation
|
||||
- For step 1, check [Generate Resources](https://kyverno.io/docs/writing-policies/generate/) documentation
|
||||
|
||||
- For step 2, check [Preconditions](https://kyverno.io/docs/policy-types/cluster-policy/preconditions/) documentation
|
||||
- For step 2, check [Preconditions](https://kyverno.io/docs/writing-policies/preconditions/) documentation
|
||||
|
||||
- For step 3, check [External Data Sources](https://kyverno.io/docs/policy-types/cluster-policy/external-data-sources/) documentation
|
||||
- For step 3, check [External Data Sources](https://kyverno.io/docs/writing-policies/external-data-sources/) documentation
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
# Exercise — Monokube static pods
|
||||
|
||||
- We want to run a very basic Kubernetes cluster by starting only:
|
||||
|
||||
- kubelet
|
||||
|
||||
- a container engine (e.g. Docker)
|
||||
|
||||
- The other components (control plane and otherwise) should be started with:
|
||||
|
||||
- static pods
|
||||
|
||||
- "classic" manifests loaded with e.g. `kubectl apply`
|
||||
|
||||
- This should be done with the "monokube" VM
|
||||
|
||||
(which has Docker and kubelet 1.19 binaries available)
|
||||
|
||||
---
|
||||
|
||||
## Images to use
|
||||
|
||||
Here are some suggestions of images:
|
||||
|
||||
- etcd → `quay.io/coreos/etcd:vX.Y.Z`
|
||||
|
||||
- Kubernetes components → `registry.k8s.io/kube-XXX:vX.Y.Z`
|
||||
|
||||
(where `XXX` = `apiserver`, `scheduler`, `controller-manager`)
|
||||
|
||||
To know which versions to use, check the version of the binaries installed on the `monokube` VM, and use the same ones.
|
||||
|
||||
See next slide for more hints!
|
||||
|
||||
---
|
||||
|
||||
## Inventory
|
||||
|
||||
We'll need to run:
|
||||
|
||||
- kubelet (with the flag for static pod manifests)
|
||||
|
||||
- Docker
|
||||
|
||||
- static pods for control plane components
|
||||
|
||||
(suggestion: use `hostNetwork`)
|
||||
|
||||
- static pod or DaemonSet for `kube-proxy`
|
||||
|
||||
(will require a privileged security context)
|
||||
@@ -1,86 +0,0 @@
|
||||
# Exercise — Writing blue/green YAML
|
||||
|
||||
- We want to author YAML manifests for the "color" app
|
||||
|
||||
(use image `jpetazzo/color` or `ghcr.io/jpetazzo/color`)
|
||||
|
||||
- That app serves web requests on port 80
|
||||
|
||||
- We want to deploy two instances of that app (`blue` and `green`)
|
||||
|
||||
- We want to expose the app with a service named `front`, such that:
|
||||
|
||||
90% of the requests are sent to `blue`, and 10% to `green`
|
||||
|
||||
---
|
||||
|
||||
## End goal
|
||||
|
||||
- We want to be able to do something like:
|
||||
```bash
|
||||
kubectl apply -f blue-green-demo.yaml
|
||||
```
|
||||
|
||||
- Then connect to the `front` service and see responses from `blue` and `green`
|
||||
|
||||
- Then measure e.g. on 100 requests how many go to `blue` and `green`
|
||||
|
||||
(we want a 90/10 traffic split)
|
||||
|
||||
- Go ahead, or check the next slides for hints!
|
||||
|
||||
---
|
||||
|
||||
## Step 1
|
||||
|
||||
- Test the app in isolation:
|
||||
|
||||
- create a Deployment called `blue`
|
||||
|
||||
- expose it with a Service
|
||||
|
||||
- connect to the service and see a "blue" reply
|
||||
|
||||
- If you use a `ClusterIP` service:
|
||||
|
||||
- if you're logged directly on the clusters you can connect directly
|
||||
|
||||
- otherwise you can use `kubectl port-forward`
|
||||
|
||||
- Otherwise, you can use a `NodePort` or `LoadBalancer` service
|
||||
|
||||
---
|
||||
|
||||
## Step 2
|
||||
|
||||
- Add the `green` Deployment
|
||||
|
||||
- Create the `front` service
|
||||
|
||||
- Edit the `front` service to replace its selector with a custom one
|
||||
|
||||
- Edit `blue` and `green` to add the label(s) of your custom selector
|
||||
|
||||
- Check that traffic hits both green and blue
|
||||
|
||||
- Think about how to obtain the 90/10 traffic split
|
||||
|
||||
---
|
||||
|
||||
## Step 3
|
||||
|
||||
- Generate, write, extract, ... YAML manifests for all components
|
||||
|
||||
(`blue` and `green` Deployments, `front` Service)
|
||||
|
||||
- Check that applying the manifests (e.g. in a brand new namespace) works
|
||||
|
||||
- Bonus points: add a one-shot pod to check the traffic split!
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Would this be a viable option to obtain, say, a 95% / 5% traffic split?
|
||||
|
||||
- What about 99% / 1 %?
|
||||
@@ -1,135 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 13 mai 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 14 mai 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 15 mai 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 16 mai 2025</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 20 mai 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 21 mai 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 22 mai 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 23 mai 2025</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Lundi 26 mai 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 27 mai 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 28 mai 2025</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Lundi 2 juin 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 3 juin 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 4 juin 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 5 juin 2025</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 10 juin 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 11 juin 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 12 juin 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 13 juin 2025</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 73 KiB |
|
Before Width: | Height: | Size: 186 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 221 KiB |
|
Before Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 162 KiB |
|
Before Width: | Height: | Size: 570 KiB |
|
Before Width: | Height: | Size: 278 KiB |
|
Before Width: | Height: | Size: 347 KiB |
|
Before Width: | Height: | Size: 192 KiB |
|
Before Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 241 KiB |
|
Before Width: | Height: | Size: 189 KiB |
|
Before Width: | Height: | Size: 29 KiB |
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Buildkit.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,73 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,81 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
@@ -1,349 +0,0 @@
|
||||
# K01- Installing a Kubernetes cluster from scratch
|
||||
|
||||
We operated a managed cluster from **Scaleway** `Kapsule`.
|
||||
|
||||
It's great! Most batteries are included:
|
||||
|
||||
- storage classes, with an already configured default one
|
||||
- a default CNI with `Cilium`
|
||||
<br/>(`Calico` is supported too)
|
||||
- a _IaaS_ load-balancer that is manageable by `ingress-controllers`
|
||||
- a management _WebUI_ with the Kubernetes dashboard
|
||||
- an observability stack with `metrics-server` and the Kubernetes dashboard
|
||||
|
||||
But what about _on premises_ needs?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## On premises Kubernetes distributions
|
||||
|
||||
The [CNCF landscape](https://landscape.cncf.io/?fullscreen=yes&zoom=200&group=certified-partners-and-providers) currently lists **61!** Kubernetes distributions, today.
|
||||
Not speaking of Kubernetes managed services from Cloud providers…
|
||||
|
||||
Please, refer to the [`Setting up Kubernetes` chapter in the High Five M2 module](./2.yml.html#toc-setting-up-kubernetes) for more infos about Kubernetes distributions.
|
||||
|
||||
---
|
||||
|
||||
## Introducing k0s
|
||||
|
||||
Nowadays, some "light" distros are considered good enough to run production clusters.
|
||||
That's the case for `k0s`.
|
||||
|
||||
It's an open source Kubernetes lightweight distribution.
|
||||
Mainly relying on **Mirantis**, a long-time software vendor in Kubernetes ecosystem.
|
||||
(The ones who bought `Docker Enterprise` a long time ago. remember?)
|
||||
|
||||
`k0s` aims to be both
|
||||
|
||||
- a lightweight distribution for _edge-computing_ and development pupose
|
||||
- an enterprise-grade HA distribution fully supported by its editor
|
||||
<br/>`MKE4` and `kordent` leverage on `k0s`
|
||||
|
||||
---
|
||||
|
||||
### `k0s` package
|
||||
|
||||
Its single binary includes:
|
||||
|
||||
- a CRI (`containerd`)
|
||||
- Kubernetes vanilla control plane components (including both `etcd`)
|
||||
- a vanilla network stack
|
||||
- `kube-router`
|
||||
- `kube-proxy`
|
||||
- `coredns`
|
||||
- `konnectivity`
|
||||
- `kubectl` CLI
|
||||
- install / uninstall features
|
||||
- backup / restore features
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Konnectivity
|
||||
|
||||
You've seen that Kubernetes cluster architecture is very versatile.
|
||||
I'm referring to the [`Kubernetes architecture` chapter in the High Five M5 module](./5.yml.html#toc-kubernetes-architecture)
|
||||
|
||||
Network communications between control plane components and worker nodes might be uneasy to configure.
|
||||
`Konnectivity` is a response to this pain. It acts as an RPC proxy for any communication initiated from control plane to workers.
|
||||
|
||||
These communications are listed in [`Kubernetes internal APIs` chapter in the High Five M5 module](https://2025-01-enix.container.training/5.yml.html#toc-kubernetes-internal-apis)
|
||||
|
||||
The agent deployed on each worker node maintains an RPC tunnel with the one deployed on control plane side.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Installing `k0s`
|
||||
|
||||
It installs with a one-liner command
|
||||
|
||||
- either in single-node lightweight footprint
|
||||
- or in multi-nodes HA footprint
|
||||
|
||||
.lab[
|
||||
|
||||
- Get the binary
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ wget https://github.com/k0sproject/k0sctl/releases/download/v0.25.1/k0sctl-linux-amd64
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Prepare the config file
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the config file
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ k0sctl init \
|
||||
--controller-count 3 \
|
||||
--user docker \
|
||||
--k0s m621 m622 m623 > k0sctl.yaml
|
||||
```
|
||||
|
||||
- change the following field: `spec.hosts[*].role: controller+worker`
|
||||
- add the following fields: `spec.hosts[*].noTaints: true`
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ k0sctl apply --config k0sctl.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### And the famous one-liner
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod: ~$ k0sctl apply --config k0sctl.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Check that k0s installed correctly
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s status
|
||||
Version: v1.33.1+k0s.1
|
||||
Process ID: 60183
|
||||
Role: controller
|
||||
Workloads: true
|
||||
SingleNode: false
|
||||
Kube-api probing successful: true
|
||||
Kube-api probing last error:
|
||||
|
||||
docker@m621 ~$ sudo k0s etcd member-list
|
||||
{"members":{"m621":"https://10.10.3.190:2380","m622":"https://10.10.2.92:2380","m623":"https://10.10.2.110:2380"}}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### `kubectl` is included
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
m621 Ready control-plane 66m v1.33.1+k0s
|
||||
m622 Ready control-plane 66m v1.33.1+k0s
|
||||
m623 Ready control-plane 66m v1.33.1+k0s
|
||||
|
||||
docker@m621 ~$ sudo k0s kubectl run shpod --image jpetazzo/shpod
|
||||
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Single node install (for info!)
|
||||
|
||||
For testing purpose, you may want to use a single-node (yet `etcd`-geared) install…
|
||||
|
||||
.lab[
|
||||
|
||||
- Install it
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ curl -sSLf https://get.k0s.sh | sudo sh
|
||||
docker@m621 ~$ sudo k0s install controller --single
|
||||
docker@m621 ~$ sudo k0s start
|
||||
```
|
||||
|
||||
- Reset it
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s start
|
||||
docker@m621 ~$ sudo k0s reset
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying shpod
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s kubectl apply -f https://shpod.in/shpod.yaml
|
||||
docker@m621 ~$ sudo k0s kubectl apply -f https://shpod.in/shpod.yaml
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Flux install
|
||||
|
||||
We'll install `Flux`.
|
||||
And replay the all scenario a 2nd time.
|
||||
Let's face it: we don't have that much time. 😅
|
||||
|
||||
Since all our install and configuration is `GitOps`-based, we might just leverage on copy-paste and code configuration…
|
||||
Maybe.
|
||||
|
||||
Let's copy the 📂 `./clusters/CLOUDY` folder and rename it 📂 `./clusters/METAL`.
|
||||
|
||||
---
|
||||
|
||||
### Modifying Flux config 📄 files
|
||||
|
||||
- In 📄 file `./clusters/METAL/flux-system/gotk-sync.yaml`
|
||||
</br>change the `Kustomization` value `spec.path: ./clusters/METAL`
|
||||
- ⚠️ We'll have to adapt the `Flux` _CLI_ command line
|
||||
|
||||
- And that's pretty much it!
|
||||
- We'll see if anything goes wrong on that new cluster
|
||||
|
||||
---
|
||||
|
||||
### Connecting to our dedicated `Github` repo to host Flux config
|
||||
|
||||
.lab[
|
||||
|
||||
- let's replace `GITHUB_TOKEN` and `GITHUB_REPO` values
|
||||
- don't forget to change the patch to `clusters/METAL`
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ export GITHUB_TOKEN="my-token" && \
|
||||
export GITHUB_USER="container-training-fleet" && \
|
||||
export GITHUB_REPO="fleet-config-using-flux-XXXXX"
|
||||
|
||||
k8s@shpod:~$ flux bootstrap github \
|
||||
--owner=${GITHUB_USER} \
|
||||
--repository=${GITHUB_REPO} \
|
||||
--team=OPS \
|
||||
--team=ROCKY --team=MOVY \
|
||||
--path=clusters/METAL
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Flux deployed our complete stack
|
||||
|
||||
Everything seems to be here but…
|
||||
|
||||
- one database is in `Pending` state
|
||||
|
||||
- our `ingresses` don't work well
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ curl --header 'Host: rocky.test.enixdomain.com' http://${myIngressControllerSvcIP}
|
||||
curl: (52) Empty reply from server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Fixing the Ingress
|
||||
|
||||
The current `ingress-nginx` configuration leverages on specific annotations used by Scaleway to bind a _IaaS_ load-balancer to the `ingress-controller`.
|
||||
We don't have such kind of things here.😕
|
||||
|
||||
- We could bind our `ingress-controller` to a `NodePort`.
|
||||
`ingress-nginx` install manifests propose it here:
|
||||
</br>https://github.com/kubernetes/ingress-nginx/deploy/static/provider/baremetal
|
||||
|
||||
- In the 📄file `./clusters/METAL/ingress-nginx/sync.yaml`,
|
||||
</br>change the `Kustomization` value `spec.path: ./deploy/static/provider/baremetal`
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Troubleshooting the database
|
||||
|
||||
One of our `db-0` pod is in `Pending` state.
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ k get pods db-0 -n *-test -oyaml
|
||||
(…)
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: "2025-06-11T11:15:42Z"
|
||||
message: '0/3 nodes are available: pod has unbound immediate PersistentVolumeClaims.
|
||||
preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling.'
|
||||
reason: Unschedulable
|
||||
status: "False"
|
||||
type: PodScheduled
|
||||
phase: Pending
|
||||
qosClass: Burstable
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Troubleshooting the PersistentVolumeClaims
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ k get pvc postgresql-data-db-0 -n *-test -o yaml
|
||||
(…)
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal FailedBinding 9s (x182 over 45m) persistentvolume-controller no persistent volumes available for this claim and no storage class is set
|
||||
```
|
||||
|
||||
No `storage class` is available on this cluster.
|
||||
We hadn't the problem on our managed cluster since a default storage class was configured and then associated to our `PersistentVolumeClaim`.
|
||||
|
||||
Why is there no problem with the other database?
|
||||
@@ -1,129 +0,0 @@
|
||||
# K03- Installing OpenEBS as our CSI
|
||||
|
||||
`OpenEBS` is a _CSI_ solution capable of hyperconvergence, synchronous replication and other extra features.
|
||||
It installs with `Helm` charts.
|
||||
|
||||
- `Flux` is able to watch `Helm` repositories and install `HelmReleases`
|
||||
- To inject its configuration into the `Helm chart` , `Flux` relies on a `ConfigMap` including the `values.yaml` file
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ mkdir -p ./clusters/METAL/openebs/ && \
|
||||
cp -pr ~/container.training/k8s/M6-openebs-*.yaml \
|
||||
./clusters/METAL/openebs/ && \
|
||||
cd ./clusters/METAL/openebs/ && \
|
||||
mv M6-openebs-kustomization.yaml kustomization.yaml && \
|
||||
cd -
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating an `Helm` source in Flux for OpenEBS Helm chart
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ flux create source helm openebs \
|
||||
--url=https://openebs.github.io/openebs \
|
||||
--interval=3m \
|
||||
--export > ./clusters/METAL/openebs/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the `HelmRelease` in Flux
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ flux create helmrelease openebs \
|
||||
--namespace=openebs \
|
||||
--source=HelmRepository/openebs.flux-system \
|
||||
--chart=openebs \
|
||||
--values-from=ConfigMap/openebs-values \
|
||||
--export >> ./clusters/METAL/openebs/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## 📂 Let's review the files
|
||||
|
||||
- `M6-openebs-components.yaml`
|
||||
</br>To include the `Flux` resources in the same _namespace_ where `Flux` installs the `OpenEBS` resources, we need to create the _namespace_ **before** the installation occurs
|
||||
|
||||
- `sync.yaml`
|
||||
</br>The resources `Flux` uses to watch and get the `Helm chart`
|
||||
|
||||
- `M6-openebs-values.yaml`
|
||||
</br> the `values.yaml` file that will be injected into the `Helm chart`
|
||||
|
||||
- `kustomization.yaml`
|
||||
</br>This one is a bit special: it includes a [ConfigMap generator](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/configmapgenerator/)
|
||||
|
||||
- `M6-openebs-kustomizeconfig.yaml`
|
||||
</br></br>This one is tricky: in order for `Flux` to trigger an upgrade of the `Helm Release` when the `ConfigMap` is altered, you need to explain to the `Kustomize ConfigMap generator` how the resources are relating with each others. 🤯
|
||||
|
||||
And here we go!
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## And the result
|
||||
|
||||
Now, we have a _cluster_ featuring `openEBS`.
|
||||
But still… The PersistentVolumeClaim remains in `Pending` state!😭
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ kubectl get storageclass
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
openebs-hostpath openebs.io/local Delete WaitForFirstConsumer false 82m
|
||||
```
|
||||
We still don't have a default `StorageClass`!😤
|
||||
|
||||
---
|
||||
|
||||
### Manually enforcing the default `StorageClass`
|
||||
|
||||
Even if Flux is constantly reconciling our resources, we still are able to test evolutions by hand.
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ flux suspend helmrelease openebs -n openebs
|
||||
► suspending helmrelease openebs in openebs namespace
|
||||
✔ helmrelease suspended
|
||||
k8s@shpod ~$ kubectl patch storageclass openebs-hostpath \
|
||||
-p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
|
||||
k8s@shpod ~$ k get storageclass
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
openebs-hostpath (default) openebs.io/local Delete WaitForFirstConsumer false 82m
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Now the database is OK
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ get pvc,pods -n movy-test
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
persistentvolumeclaim/postgresql-data-db-0 Bound pvc-ede1634f-2478-42cd-8ee3-7547cd7cdde2 1Gi RWO openebs-hostpath <unset> 20m
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/db-0 1/1 Running 0 5h43m
|
||||
(…)
|
||||
```
|
||||
@@ -1,320 +0,0 @@
|
||||
# M01- Configuring **_🎬MOVY_** deployment with Flux
|
||||
|
||||
**_🎸ROCKY_** _tenant_ is now fully usable in **_⚗️TEST_** env, let's do the same for another _dev_ team: **_🎬MOVY_**
|
||||
|
||||
😈 We could do it by using `Flux` _CLI_,
|
||||
but let's see if we can succeed by just adding manifests in our `Flux` configuration repository.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Impact study
|
||||
|
||||
In our `Flux` configuration repository:
|
||||
|
||||
- Creation of the following 📂 folders: `./tenants/[base|test]/MOVY`
|
||||
|
||||
- Modification of the following 📄 file: `./clusters/CLOUDY/tenants.yaml`?
|
||||
- Well, we don't need to: the watched path include the whole `./tenants/[test]/*` folder
|
||||
|
||||
In the app repository:
|
||||
|
||||
- Creation of a `movy` branch to deploy another version of the app dedicated to movie soundtracks
|
||||
|
||||
---
|
||||
|
||||
### Creation of the 📂 folders
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cp -pr tenants/base/rocky tenants/base/movy
|
||||
cp -pr tenants/test/rocky tenants/test/movy
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Modification of tenants/[base|test]/movy/* 📄 files
|
||||
|
||||
- For 📄`M6-rocky-*.yaml`, change the file names…
|
||||
- and update the 📄`kustomization.yaml` file as a result
|
||||
|
||||
- In any file, replace any `rocky` entry by `movy`
|
||||
|
||||
- In 📄 `sync.yaml` be aware of what repository and what branch you want `Flux` to watch for **_🎬MOVY_** app deployment.
|
||||
- for this demo, let's assume we create a `movy` branch
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### What about reusing rocky-cluster-roles?
|
||||
|
||||
💡 In 📄`M6-movy-cluster-role.yaml` and 📄`rbac.yaml`, we could have reused the already existing `ClusterRoles`: `rocky-full-access`, and `rocky-pv-access`
|
||||
|
||||
A `ClusterRole` is cluster wide. It is not dedicated to a namespace.
|
||||
- Its permissions are restrained to a specific namespace by being bound to a `ServiceAccount` by a `RoleBinding`.
|
||||
- Whereas a `ClusterRoleBinding` extends the permissions to the whole cluster scope.
|
||||
|
||||
But a _tenant_ is a **_tenant_** and permissions might evolved separately for **_🎸ROCKY_** and **_🎬MOVY_**.
|
||||
|
||||
So [we got to keep'em separated](https://www.youtube.com/watch?v=GHUql3OC_uU).
|
||||
|
||||
---
|
||||
|
||||
### Let-su-go!
|
||||
|
||||
The **_⚙️OPS_** team push this new tenant configuration to `Github` for `Flux` controllers to watch and catch it!
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
git add . && \
|
||||
git commit -m':wrench: :construction_worker: add MOVY tenant configuration' && \
|
||||
git push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Another Flux error?
|
||||
|
||||
.lab[
|
||||
|
||||
- It seems that our `movy` branch is not present in the app repository
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux get kustomization -A
|
||||
NAMESPACE NAME REVISION SUSPENDED MESSAGE
|
||||
(…)
|
||||
flux-system tenant-prod False False kustomization path not found: stat /tmp/kustomization-113582828/tenants/prod: no such file or directory
|
||||
(…)
|
||||
movy-test movy False False Source artifact not found, retrying in 30s
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Creating the `movy` branch
|
||||
|
||||
- Let's create this new `movy` branch from `rocky` branch
|
||||
|
||||
.lab[
|
||||
|
||||
- You can force immediate reconciliation by typing this command:
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux reconcile source git movy-app -n movy-test
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### New branch detected
|
||||
|
||||
You now have a second app responding on [http://movy.test.mybestdomain.com]
|
||||
But as of now, it's just the same as the **_🎸ROCKY_** one.
|
||||
|
||||
We want a specific (pink-colored) version with a dataset full of movie soundtracks.
|
||||
|
||||
---
|
||||
|
||||
## New version of the **_🎬MOVY_** app
|
||||
|
||||
In our branch `movy`…
|
||||
Let's modify our `deployment.yaml` file with 2 modifications.
|
||||
|
||||
- in `spec.template.spec.containers.image` change the container image tag to `1.0.3`
|
||||
|
||||
- and… let's introduce some evil enthropy by changing this line… 😈😈😈
|
||||
|
||||
```yaml
|
||||
value: jdbc:postgresql://db/music
|
||||
```
|
||||
|
||||
by this one
|
||||
|
||||
```yaml
|
||||
value: jdbc:postgresql://db.rocky-test/music
|
||||
```
|
||||
|
||||
And push the modifications…
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### MOVY app is connected to ROCKY database
|
||||
|
||||
How evil have we been! 😈
|
||||
We connected the **_🎬MOVY_** app to the **_🎸ROCKY_** database.
|
||||
|
||||
Even if our tenants are isolated in how they manage their Kubernetes resources…
|
||||
pod network is still full mesh and any connection is authorized.
|
||||
|
||||
> The **_⚙️OPS_** team should fix this!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Adding NetworkPolicies to **_🎸ROCKY_** and **_🎬MOVY_** namespaces
|
||||
|
||||
`Network policies` may be seen as the firewall feature in the pod network.
|
||||
They rules ingress and egress network connections considering a described subset of pods.
|
||||
|
||||
Please, refer to the [`Network policies` chapter in the High Five M4 module](./4.yml.html#toc-network-policies)
|
||||
|
||||
- In our case, we just add the file `~/container.training/k8s/M6-network-policies.yaml`
|
||||
</br>in our `./tenants/base/movy` folder
|
||||
|
||||
- without forgetting to update our `kustomization.yaml` file
|
||||
|
||||
- and without forgetting to commit 😁
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:3
|
||||
branch MOVY order:4
|
||||
branch YouRHere order:5
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'k0s install on METAL cluster' tag:'K01'
|
||||
commit id:'Flux config. for METAL cluster' tag:'K02'
|
||||
branch METAL_TEST-PROD order:3
|
||||
commit id:'ROCKY/MOVY tenants on METAL' type: HIGHLIGHT
|
||||
checkout OPS
|
||||
commit id:'Flux config. for OpenEBS' tag:'K03'
|
||||
checkout METAL_TEST-PROD
|
||||
merge OPS id:'openEBS on METAL' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Prometheus install'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Kyverno install'
|
||||
commit id:'Kyverno rules'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
</pre>
|
||||
@@ -1,417 +0,0 @@
|
||||
# R01- Configuring **_🎸ROCKY_** deployment with Flux
|
||||
|
||||
The **_⚙️OPS_** team manages 2 distinct envs: **_⚗️TEST_** et _**🚜PROD**_
|
||||
|
||||
Thanks to _Kustomize_
|
||||
1. it creates a **_base_** common config
|
||||
2. this common config is overwritten with a **_⚗️TEST_** _tenant_-specific configuration
|
||||
3. the same applies with a _**🚜PROD**_-specific configuration
|
||||
|
||||
> 💡 This seems complex, but no worries: Flux's CLI handles most of it.
|
||||
|
||||
---
|
||||
|
||||
## Creating the **_🎸ROCKY_**-dedicated _tenant_ in **_⚗️TEST_** env
|
||||
|
||||
- Using the `flux` _CLI_, we create the file configuring the **_🎸ROCKY_** team's dedicated _tenant_…
|
||||
- … this file takes place in the `base` common configuration for both envs
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
mkdir -p ./tenants/base/rocky && \
|
||||
flux create tenant rocky \
|
||||
--with-namespace=rocky-test \
|
||||
--cluster-role=rocky-full-access \
|
||||
--export > ./tenants/base/rocky/rbac.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### 📂 ./tenants/base/rocky/rbac.yaml
|
||||
|
||||
Let's see our file…
|
||||
|
||||
3 resources are created: `Namespace`, `ServiceAccount`, and `ClusterRoleBinding`
|
||||
|
||||
`Flux` **impersonates** as this `ServiceAccount` when it applies any resources found in this _tenant_-dedicated source(s)
|
||||
|
||||
- By default, the `ServiceAccount` is bound to the `cluster-admin` `ClusterRole`
|
||||
- The team maintaining the sourced `Github` repository is almighty at cluster scope
|
||||
|
||||
A not that much isolated _tenant_! 😕
|
||||
|
||||
That's why the **_⚙️OPS_** team enforces specific `ClusterRoles` with restricted permissions
|
||||
|
||||
Let's create these permissions!
|
||||
|
||||
---
|
||||
|
||||
## _namespace_ isolation for **_🎸ROCKY_**
|
||||
|
||||
.lab[
|
||||
|
||||
- Here are the restricted permissions to use in the `rocky-test` `Namespace`
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cp ~/container.training/k8s/M6-rocky-cluster-role.yaml ./tenants/base/rocky/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
> 💡 Note that some resources are managed at cluster scope (like `PersistentVolumes`).
|
||||
> We need specific permissions, then…
|
||||
|
||||
---
|
||||
|
||||
## Creating `Github` source in Flux for **_🎸ROCKY_** app repository
|
||||
|
||||
A specific _branch_ of the `Github` repository is monitored by the `Flux` source
|
||||
|
||||
.lab[
|
||||
|
||||
- ⚠️ you may change the **repository URL** to the one of your own clone
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create source git rocky-app \
|
||||
--namespace=rocky-test \
|
||||
--url=https://github.com/Musk8teers/container.training-spring-music/ \
|
||||
--branch=rocky --export > ./tenants/base/rocky/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating `kustomization` in Flux for **_🎸ROCKY_** app repository
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create kustomization rocky \
|
||||
--namespace=rocky-test \
|
||||
--service-account=rocky \
|
||||
--source=GitRepository/rocky-app \
|
||||
--path="./k8s/" --export >> ./tenants/base/rocky/sync.yaml
|
||||
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cd ./tenants/base/rocky/ && \
|
||||
kustomize create --autodetect && \
|
||||
cd -
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### 📂 Flux config files
|
||||
|
||||
Let's review our `Flux` configuration files
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cat ./tenants/base/rocky/sync.yaml && \
|
||||
cat ./tenants/base/rocky/kustomization.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding a kustomize patch for **_⚗️TEST_** cluster deployment
|
||||
|
||||
💡 Remember the DRY strategy!
|
||||
|
||||
- The `Flux` tenant-dedicated configuration is looking for this file: `.tenants/test/rocky/kustomization.yaml`
|
||||
- It has been configured here: `clusters/CLOUDY/tenants.yaml`
|
||||
|
||||
- All the files we just created are located in `.tenants/base/rocky`
|
||||
- So we have to create a specific kustomization in the right location
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
mkdir -p ./tenants/test/rocky && \
|
||||
cp ~/container.training/k8s/M6-rocky-test-patch.yaml ./tenants/test/rocky/ && \
|
||||
cp ~/container.training/k8s/M6-rocky-test-kustomization.yaml ./tenants/test/rocky/kustomization.yaml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Synchronizing Flux config with its Github repo
|
||||
|
||||
Locally, our `Flux` config repo is ready
|
||||
The **_⚙️OPS_** team has to push it to `Github` for `Flux` controllers to watch and catch it!
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
git add . && \
|
||||
git commit -m':wrench: :construction_worker: add ROCKY tenant configuration' && \
|
||||
git push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux resources for ROCKY tenant 1/2
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux get all -A
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
flux-system gitrepository/flux-system main@sha1:8ffd72cf False
|
||||
True stored artifact for revision 'main@sha1:8ffd72cf'
|
||||
rocky-test gitrepository/rocky-app rocky@sha1:ffe9f3fe False
|
||||
True stored artifact for revision 'rocky@sha1:ffe9f3fe'
|
||||
(…)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux resources for ROCKY _tenant_ 2/2
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux get all -A
|
||||
(…)
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
flux-system kustomization/flux-system main@sha1:8ffd72cf False
|
||||
True Applied revision: main@sha1:8ffd72cf
|
||||
flux-system kustomization/tenant-prod False
|
||||
False kustomization path not found: stat /tmp/kustomization-1164119282/tenants/prod: no such file or directory
|
||||
flux-system kustomization/tenant-test main@sha1:8ffd72cf False
|
||||
True Applied revision: main@sha1:8ffd72cf
|
||||
rocky-test kustomization/rocky False
|
||||
False StatefulSet/db dry-run failed (Forbidden): statefulsets.apps "db" is forbidden: User "system:serviceaccount:rocky-test:rocky" cannot patch resource "statefulsets" in API group "apps" at the cluster scope
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
And here is our 2nd Flux error(s)! 😅
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux Kustomization, mutability, …
|
||||
|
||||
🔍 Notice that none of the expected resources is created:
|
||||
the whole kustomization is rejected, even if the `StatefulSet` is this only resource that fails!
|
||||
|
||||
🔍 Flux Kustomization uses the dry-run feature to templatize the resources and then applying patches onto them
|
||||
Good but some resources are not completely mutable, such as `StatefulSets`
|
||||
|
||||
We have to fix the mutation by applying the change without having to patch the resource.
|
||||
|
||||
🔍 Simply add the `spec.targetNamespace: rocky-test` to the `Kustomization` named `rocky`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## And then it's deployed 1/2
|
||||
|
||||
You should see the following resources in the `rocky-test` namespace
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod-578d64468-tp7r2 ~/$ k get pods,svc,deployments -n rocky-test
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/db-0 1/1 Running 0 47s
|
||||
pod/web-6c677bf97f-c7pkv 0/1 Running 1 (22s ago) 47s
|
||||
pod/web-6c677bf97f-p7b4r 0/1 Running 1 (19s ago) 47s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/db ClusterIP 10.32.6.128 <none> 5432/TCP 48s
|
||||
service/web ClusterIP 10.32.2.202 <none> 80/TCP 48s
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/web 0/2 2 0 47s
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## And then it's deployed 2/2
|
||||
|
||||
You should see the following resources in the `rocky-test` namespace
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod-578d64468-tp7r2 ~/$ k get statefulsets,pvc,pv -n rocky-test
|
||||
NAME READY AGE
|
||||
statefulset.apps/db 1/1 47s
|
||||
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
|
||||
persistentvolumeclaim/postgresql-data-db-0 Bound pvc-c1963a2b-4fc9-4c74-9c5a-b0870b23e59a 1Gi RWO sbs-default <unset> 47s
|
||||
|
||||
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
|
||||
persistentvolume/postgresql-data 1Gi RWO,RWX Retain Available <unset> 47s
|
||||
persistentvolume/pvc-150fcef5-ebba-458e-951f-68a7e214c635 1G RWO Delete Bound shpod/shpod sbs-default <unset> 4h46m
|
||||
persistentvolume/pvc-c1963a2b-4fc9-4c74-9c5a-b0870b23e59a 1Gi RWO Delete Bound rocky-test/postgresql-data-db-0 sbs-default <unset> 47s
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### PersistentVolumes are using a default `StorageClass`
|
||||
|
||||
💡 This managed cluster comes with custom `StorageClasses` leveraging on Cloud _IaaS_ capabilities (i.e. block devices)
|
||||
|
||||

|
||||
|
||||
- a default `StorageClass` is applied if none is specified (like here)
|
||||
- for **_🏭PROD_** purpose, ops team might enforce a more performant `StorageClass`
|
||||
- on a bare-metal cluster, **_🏭PROD_** team has to configure and provide `StorageClasses` on its own
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
## Upgrading ROCKY app
|
||||
|
||||
The Git source named `rocky-app` is pointing at
|
||||
- a Github repository named [Musk8teers/container.training-spring-music](https://github.com/Musk8teers/container.training-spring-music/)
|
||||
- on its branch named `rocky`
|
||||
|
||||
This branch deploy the v1.0.0 of the _Web_ app:
|
||||
`spec.template.spec.containers.image: ghcr.io/musk8teers/container.training-spring-music:1.0.0`
|
||||
|
||||
What happens if the **_🎸ROCKY_** team upgrades its branch to deploy `v1.0.1` of the _Web_ app?
|
||||
|
||||
---
|
||||
|
||||
## _tenant_ **_🏭PROD_**
|
||||
|
||||
💡 **_🏭PROD_** _tenant_ is still waiting for its `Flux` configuration, but don't bother for it right now.
|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:3
|
||||
branch MOVY order:4
|
||||
branch YouRHere order:5
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
</pre>
|
||||
@@ -1,354 +0,0 @@
|
||||
# Kubernetes in production — <br/>an end-to-end example
|
||||
|
||||
- Previous training modules focused on individual topics
|
||||
|
||||
(e.g. RBAC, network policies, CRDs, Helm...)
|
||||
|
||||
- We will now show how to put everything together to deploy apps in production
|
||||
|
||||
(dealing with typical challenges like: multiple apps, multiple teams, multiple clusters...)
|
||||
|
||||
- Our first challenge will be to pick and choose which components to use
|
||||
|
||||
(among the vast [Cloud Native Landscape](https://landscape.cncf.io/))
|
||||
|
||||
- We'll start with a basic Kubernetes cluster (on cloud or on premises)
|
||||
|
||||
- We'll and enhance it by adding features one at a time
|
||||
|
||||
---
|
||||
|
||||
## The cast
|
||||
|
||||
There are 3 teams in our company:
|
||||
|
||||
- **_⚙️OPS_** is the platform engineering team
|
||||
|
||||
- they're responsible for building and configuring Kubernetes clusters
|
||||
|
||||
- the **_🎸ROCKY_** team develops and manages the **_🎸ROCKY_** app
|
||||
|
||||
- that app manages a collection of _rock & pop_ albums
|
||||
|
||||
- it's deployed with plain YAML manifests
|
||||
|
||||
- the **_🎬MOVY_** team develops and manages the **_🎬MOVY_** app
|
||||
|
||||
- that app manages a collection of _movie soundtrack_ albums
|
||||
|
||||
- it's deployed with Helm charts
|
||||
|
||||
---
|
||||
|
||||
## Code and team organization
|
||||
|
||||
- **_🎸ROCKY_** and **_🎬MOVY_** reside in separate git repositories
|
||||
|
||||
- Each team can write code, build package, and deploy their applications:
|
||||
|
||||
- independently
|
||||
<br/>(= without having to worry about what's happening in the other repo)
|
||||
|
||||
- autonomously
|
||||
<br/>(= without having to synchronize or obtain privileges from another team)
|
||||
|
||||
---
|
||||
|
||||
## Cluster organization
|
||||
|
||||
The **_⚙️OPS_** team manages 2 Kubernetes clusters:
|
||||
|
||||
- **_☁️CLOUDY_**: managed cluster from a public cloud provider
|
||||
|
||||
- **_🤘METAL_**: custom-built cluster installed on bare Linux servers
|
||||
|
||||
Let's see the differences between these clusters.
|
||||
|
||||
---
|
||||
|
||||
## **_☁️CLOUDY_** cluster
|
||||
|
||||
- Managed cluster from a public cloud provider ("Kubernetes-as-a-Service")
|
||||
|
||||
- HA control plane deployed and managed by the cloud provider
|
||||
|
||||
- Two worker nodes (potentially with cluster autoscaling)
|
||||
|
||||
- Usually comes pre-installed with some basic features
|
||||
|
||||
(e.g. metrics-server, CNI, CSI, sometimes an ingress controller)
|
||||
|
||||
- Requires extra components to be production-ready
|
||||
|
||||
(e.g. Flux or other gitops pipeline, observability...)
|
||||
|
||||
- Example: [Scaleway Kapsule][kapsule] (but many other KaaS options are available)
|
||||
|
||||
[kapsule]: https://www.scaleway.com/en/kubernetes-kapsule/
|
||||
|
||||
---
|
||||
|
||||
## **_🤘METAL_** cluster
|
||||
|
||||
- Custom-built cluster installed on bare Linux servers
|
||||
|
||||
- HA control plane deployed and managed by the **_⚙️OPS_** team
|
||||
|
||||
- 3 nodes
|
||||
|
||||
- in our example, the nodes will run both the control plane and our apps
|
||||
|
||||
- it is more typical to use dedicated control plane nodes
|
||||
<br/>(example: 3 control plane nodes + at least 3 worker nodes)
|
||||
|
||||
- Comes with even less pre-installed components than **_☁️CLOUDY_**
|
||||
|
||||
(requiring more work from our **_⚙️OPS_** team)
|
||||
|
||||
- Example: we'll use [k0s] (but many other distros are available)
|
||||
|
||||
[k0s]: https://k0sproject.io/
|
||||
|
||||
---
|
||||
|
||||
## **_⚗️TEST_** and **_🏭PROD_**
|
||||
|
||||
- The **_⚙️OPS_** team creates 2 environments for each dev team
|
||||
|
||||
(**_⚗️TEST_** and **_🏭PROD_**)
|
||||
|
||||
- These environments exist on both clusters
|
||||
|
||||
(meaning 2 apps × 2 clusters × 2 envs = 8 envs total)
|
||||
|
||||
- The setup for each env and cluster should follow DRY principles
|
||||
|
||||
(to ensure configurations are consistent and minimize maintenance)
|
||||
|
||||
- Each cluster and each env has its own lifecycle
|
||||
|
||||
(= it should be possible to deploy, add an extra components/feature...
|
||||
<br/>on one env without impacting the other)
|
||||
|
||||
---
|
||||
|
||||
### Multi-tenancy
|
||||
|
||||
Both **_🎸ROCKY_** and **_🎬MOVY_** teams should use **dedicated _"tenants"_** on each cluster/env
|
||||
|
||||
- the **_🎸ROCKY_** team should be able to deploy, upgrade and configure its app within its dedicated **namespace** without anybody else involved
|
||||
|
||||
- and the same for **_🎬MOVY_**
|
||||
|
||||
- neither team's deployments might interfere with the other, maintaining a clean and conflict-free environment
|
||||
|
||||
---
|
||||
|
||||
## Application overview
|
||||
|
||||
- Both dev teams are working on an app to manage music albums
|
||||
|
||||
- This app is mostly based on a `Spring` framework demo called spring-music
|
||||
|
||||
- This lab uses a dedicated fork [container.training-spring-music](https://github.com/Musk8teers/container.training-spring-music):
|
||||
- with 2 branches dedicated to the **_🎸ROCKY_** and **_🎬MOVY_** teams
|
||||
|
||||
- The app architecture consists of 2 tiers:
|
||||
- a `Java/Spring` Web app
|
||||
- a `PostgreSQL` database
|
||||
|
||||
---
|
||||
|
||||
### 📂 specific file: application.yaml
|
||||
|
||||
This is where we configure the application to connect to the `PostgreSQL` database.
|
||||
|
||||
.lab[
|
||||
|
||||
🔍 Location: [/src/main/resources/application.yml](https://github.com/Musk8teers/container.training-spring-music/blob/main/src/main/resources/application.yml)
|
||||
|
||||
]
|
||||
|
||||
`PROFILE=postgres` env var is set in [docker-compose.yaml](https://github.com/Musk8teers/container.training-spring-music/blob/main/docker-compose.yml) file, for example…
|
||||
|
||||
---
|
||||
|
||||
### 📂 specific file: AlbumRepositoryPopulator.java
|
||||
|
||||
|
||||
This is where the album collection is initially loaded from the file [`album.json`](https://github.com/Musk8teers/container.training-spring-music/blob/main/src/main/resources/albums.json)
|
||||
|
||||
.lab[
|
||||
|
||||
🔍 Location: [`/src/main/java/org/cloudfoundry/samples/music/repositories/AlbumRepositoryPopulator.java`](https://github.com/Musk8teers/container.training-spring-music/blob/main/src/main/java/org/cloudfoundry/samples/music/repositories/AlbumRepositoryPopulator.java)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## 🚚 How to deploy?
|
||||
|
||||
The **_⚙️OPS_** team offers 2 deployment strategies that dev teams can use autonomously:
|
||||
|
||||
- **_🎸ROCKY_** uses a `Flux` _GitOps_ workflow based on regular Kubernetes `YAML` resources
|
||||
|
||||
- **_🎬MOVY_** uses a `Flux` _GitOps_ workflow based on `Helm` charts
|
||||
|
||||
---
|
||||
|
||||
## 🍱 What features?
|
||||
|
||||
<!-- TODO: complete this slide when all the modules are there -->
|
||||
The **_⚙️OPS_** team aims to provide clusters offering the following features to its users:
|
||||
|
||||
- a network stack with efficient workload isolation
|
||||
|
||||
- ingress and load-balancing capabilites
|
||||
|
||||
- an enterprise-grade monitoring solution for real-time insights
|
||||
|
||||
- automated policy rule enforcement to control Kubernetes resources requested by dev teams
|
||||
|
||||
<!-- - HA PostgreSQL -->
|
||||
|
||||
<!-- - HTTPs certificates to expose the applications -->
|
||||
|
||||
---
|
||||
|
||||
## 🌰 In a nutshell
|
||||
|
||||
- 3 teams: **_⚙️OPS_**, **_🎸ROCKY_**, **_🎬MOVY_**
|
||||
|
||||
- 2 clusters: **_☁️CLOUDY_**, **_🤘METAL_**
|
||||
|
||||
- 2 envs per cluster and per dev team: **_⚗️TEST_**, **_🏭PROD_**
|
||||
|
||||
- 2 Web apps Java/Spring + PostgreSQL: one for pop and rock albums, another for movie soundtrack albums
|
||||
|
||||
- 2 deployment strategies: regular `YAML` resources + `Kustomize`, `Helm` charts
|
||||
|
||||
|
||||
> 💻 `Flux` is used both
|
||||
> - to operate the clusters
|
||||
> - and to manage the _GitOps_ deployment workflows
|
||||
|
||||
---
|
||||
|
||||
### What our scenario might look like…
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:4
|
||||
branch MOVY order:5
|
||||
branch YouRHere order:6
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT tag:'T07'
|
||||
|
||||
checkout OPS
|
||||
commit id:'k0s install on METAL cluster' tag:'K01'
|
||||
commit id:'Flux config. for METAL cluster' tag:'K02'
|
||||
branch METAL_TEST-PROD order:3
|
||||
commit id:'ROCKY/MOVY tenants on METAL' type: HIGHLIGHT
|
||||
checkout OPS
|
||||
commit id:'Flux config. for OpenEBS' tag:'K03'
|
||||
checkout METAL_TEST-PROD
|
||||
merge OPS id:'openEBS on METAL' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Prometheus install'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Kyverno install'
|
||||
commit id:'Kyverno rules'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for PROD tenant' tag:'P01'
|
||||
branch PROD-env order:2
|
||||
commit id:'ROCKY tenant on PROD'
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for PROD' tag:'R04'
|
||||
checkout PROD-env
|
||||
merge OPS id:'PROD ready to deploy ROCKY' type: HIGHLIGHT
|
||||
checkout PROD-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY HELM chart' tag:'M03'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0'
|
||||
</pre>
|
||||
@@ -1,410 +0,0 @@
|
||||
# T02- creating **_⚗️TEST_** env on our **_☁️CLOUDY_** cluster
|
||||
|
||||
Let's take a look at our **_☁️CLOUDY_** cluster!
|
||||
|
||||
**_☁️CLOUDY_** is a Kubernetes cluster created with [Scaleway Kapsule](https://www.scaleway.com/en/kubernetes-kapsule/) managed service
|
||||
|
||||
This managed cluster comes preinstalled with specific features:
|
||||
- Kubernetes dashboard
|
||||
- specific _Storage Classes_ based on Scaleway _IaaS_ block storage offerings
|
||||
- a `Cilium` _CNI_ stack already set up
|
||||
|
||||
---
|
||||
|
||||
## Accessing the managed Kubernetes cluster
|
||||
|
||||
To access our cluster, we'll connect via [`shpod`](https://github.com/jpetazzo/shpod)
|
||||
|
||||
.lab[
|
||||
|
||||
- If you already have a kubectl on your desktop computer
|
||||
```bash
|
||||
kubectl -n shpod run shpod --image=jpetazzo/shpod
|
||||
kubectl -n shpod exec -it shpod -- bash
|
||||
```
|
||||
- or directly via ssh
|
||||
```bash
|
||||
ssh -p myPort k8s@mySHPODSvcIpAddress
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Flux installation
|
||||
|
||||
Once `Flux` is installed,
|
||||
the **_⚙️OPS_** team exclusively operates its clusters by updating a code base in a `Github` repository
|
||||
|
||||
_GitOps_ and `Flux` enable the **_⚙️OPS_** team to rely on the _first-class citizen pattern_ in Kubernetes' world through these steps:
|
||||
|
||||
- describe the **desired target state**
|
||||
- and let the **automated convergence** happens
|
||||
|
||||
---
|
||||
|
||||
### Checking prerequisites
|
||||
|
||||
The `Flux` _CLI_ is available in our `shpod` pod
|
||||
|
||||
Before installation, we need to check that:
|
||||
- `Flux` _CLI_ is correctly installed
|
||||
- it can connect to the `API server`
|
||||
- our versions of `Flux` and Kubernetes are compatible
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux --version
|
||||
flux version 2.5.1
|
||||
|
||||
k8s@shpod:~$ flux check --pre
|
||||
► checking prerequisites
|
||||
✔ Kubernetes 1.32.3 >=1.30.0-0
|
||||
✔ prerequisites checks passed
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Git repository for Flux configuration
|
||||
|
||||
The **_⚙️OPS_** team uses `Flux` _CLI_
|
||||
- to create a `git` repository named `fleet-config-using-flux-XXXXX` (⚠ replace `XXXXX` by a personnal suffix)
|
||||
- in our `Github` organization named `container-training-fleet`
|
||||
|
||||
Prerequisites are:
|
||||
- `Flux` _CLI_ needs a `Github` personal access token (_PAT_)
|
||||
- to create and/or access the `Github` repository
|
||||
- to give permissions to existing teams in our `Github` organization
|
||||
- The PAT needs _CRUD_ permissions on our `Github` organization
|
||||
- repositories
|
||||
- admin:public_key
|
||||
- users
|
||||
|
||||
- As **_⚙️OPS_** team, let's creates a `Github` personal access token…
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Creating dedicated `Github` repo to host Flux config
|
||||
|
||||
.lab[
|
||||
|
||||
- let's replace the `GITHUB_TOKEN` value by our _Personal Access Token_
|
||||
- and the `GITHUB_REPO` value by our specific repository name
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ export GITHUB_TOKEN="my-token" && \
|
||||
export GITHUB_USER="container-training-fleet" && \
|
||||
export GITHUB_REPO="fleet-config-using-flux-XXXXX"
|
||||
|
||||
k8s@shpod:~$ flux bootstrap github \
|
||||
--owner=${GITHUB_USER} \
|
||||
--repository=${GITHUB_REPO} \
|
||||
--team=OPS \
|
||||
--team=ROCKY --team=MOVY \
|
||||
--path=clusters/CLOUDY
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
Here is the result
|
||||
|
||||
```bash
|
||||
✔ repository "https://github.com/container-training-fleet/fleet-config-using-flux-XXXXX" created
|
||||
► reconciling repository permissions
|
||||
✔ granted "maintain" permissions to "OPS"
|
||||
✔ granted "maintain" permissions to "ROCKY"
|
||||
✔ granted "maintain" permissions to "MOVY"
|
||||
► reconciling repository permissions
|
||||
✔ reconciled repository permissions
|
||||
► cloning branch "main" from Git repository "https://github.com/container-training-fleet/fleet-config-using-flux-XXXXX.git"
|
||||
✔ cloned repository
|
||||
► generating component manifests
|
||||
✔ generated component manifests
|
||||
✔ committed component manifests to "main" ("7c97bdeb5b932040fd8d8a65fe1dc84c66664cbf")
|
||||
► pushing component manifests to "https://github.com/container-training-fleet/fleet-config-using-flux-XXXXX.git"
|
||||
✔ component manifests are up to date
|
||||
► installing components in "flux-system" namespace
|
||||
✔ installed components
|
||||
✔ reconciled components
|
||||
► determining if source secret "flux-system/flux-system" exists
|
||||
► generating source secret
|
||||
✔ public key: ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBFqaT8B8SezU92qoE+bhnv9xONv9oIGuy7yVAznAZfyoWWEVkgP2dYDye5lMbgl6MorG/yjfkyo75ETieAE49/m9D2xvL4esnSx9zsOLdnfS9W99XSfFpC2n6soL+Exodw==
|
||||
✔ configured deploy key "flux-system-main-flux-system-./clusters/CLOUDY" for "https://github.com/container-training-fleet/fleet-config-using-flux-XXXXX"
|
||||
► applying source secret "flux-system/flux-system"
|
||||
✔ reconciled source secret
|
||||
► generating sync manifests
|
||||
✔ generated sync manifests
|
||||
✔ committed sync manifests to "main" ("11035e19cabd9fd2c7c94f6e93707f22d69a5ff2")
|
||||
► pushing sync manifests to "https://github.com/container-training-fleet/fleet-config-using-flux-XXXXX.git"
|
||||
► applying sync manifests
|
||||
✔ reconciled sync configuration
|
||||
◎ waiting for GitRepository "flux-system/flux-system" to be reconciled
|
||||
✔ GitRepository reconciled successfully
|
||||
◎ waiting for Kustomization "flux-system/flux-system" to be reconciled
|
||||
✔ Kustomization reconciled successfully
|
||||
► confirming components are healthy
|
||||
✔ helm-controller: deployment ready
|
||||
✔ kustomize-controller: deployment ready
|
||||
✔ notification-controller: deployment ready
|
||||
✔ source-controller: deployment ready
|
||||
✔ all components are healthy
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Flux configures Github repository access for teams
|
||||
|
||||
- `Flux` sets up permissions that allow teams within our organization to **access** the `Github` repository as maintainers
|
||||
- Teams need to exist before `Flux` proceeds to this configuration
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### ⚠️ Disclaimer
|
||||
|
||||
- In this lab, adding these teams as maintainers was merely a demonstration of how `Flux` _CLI_ sets up permissions in Github
|
||||
|
||||
- But there is no need for dev teams to have access to this `Github` repository
|
||||
|
||||
- One advantage of _GitOps_ lies in its ability to easily set up 💪🏼 **Separation of concerns** by using multiple `Flux` sources
|
||||
|
||||
---
|
||||
|
||||
### 📂 Flux config files
|
||||
|
||||
`Flux` has been successfully installed onto our **_☁️CLOUDY_** Kubernetes cluster!
|
||||
|
||||
Its configuration is managed through a _Gitops_ workflow sourced directly from our `Github` repository
|
||||
|
||||
Let's review our `Flux` configuration files we've created and pushed into the `Github` repository…
|
||||
… as well as the corresponding components running in our Kubernetes cluster
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
<!-- FIXME: wrong schema -->
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux resources 1/2
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ kubectl get all --namespace flux-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/helm-controller-b6767d66-h6qhk 1/1 Running 0 5m
|
||||
pod/kustomize-controller-57c7ff5596-94rnd 1/1 Running 0 5m
|
||||
pod/notification-controller-58ffd586f7-zxfvk 1/1 Running 0 5m
|
||||
pod/source-controller-6ff87cb475-g6gn6 1/1 Running 0 5m
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/notification-controller ClusterIP 10.104.139.156 <none> 80/TCP 5m1s
|
||||
service/source-controller ClusterIP 10.106.120.137 <none> 80/TCP 5m
|
||||
service/webhook-receiver ClusterIP 10.96.28.236 <none> 80/TCP 5m
|
||||
(…)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux resources 2/2
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ kubectl get all --namespace flux-system
|
||||
(…)
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/helm-controller 1/1 1 1 5m
|
||||
deployment.apps/kustomize-controller 1/1 1 1 5m
|
||||
deployment.apps/notification-controller 1/1 1 1 5m
|
||||
deployment.apps/source-controller 1/1 1 1 5m
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/helm-controller-b6767d66 1 1 1 5m
|
||||
replicaset.apps/kustomize-controller-57c7ff5596 1 1 1 5m
|
||||
replicaset.apps/notification-controller-58ffd586f7 1 1 1 5m
|
||||
replicaset.apps/source-controller-6ff87cb475 1 1 1 5m
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Flux components
|
||||
|
||||
- the `source controller` monitors `Git` repositories to apply Kubernetes resources on the cluster
|
||||
|
||||
- the `Helm controller` checks for new `Helm` _charts_ releases in `Helm` repositories and installs updates as needed
|
||||
|
||||
- _CRDs_ store `Flux` configuration within the Kubernetes control plane
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux resources that have been created
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux get all --all-namespaces
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
flux-system gitrepository/flux-system main@sha1:d48291a8 False
|
||||
True stored artifact for revision 'main@sha1:d48291a8'
|
||||
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
flux-system kustomization/flux-system main@sha1:d48291a8 False
|
||||
True Applied revision: main@sha1:d48291a8
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Flux CLI
|
||||
|
||||
`Flux` Command-Line Interface fulfills 3 primary functions:
|
||||
|
||||
1. It installs and configures first mandatory `Flux` resources in a _Gitops_ `git` repository
|
||||
- ensuring proper access and permissions
|
||||
|
||||
2. It locally generates `YAML` files for desired `Flux` resources so that we just need to `git push` them
|
||||
- _tenants_
|
||||
- sources
|
||||
- …
|
||||
|
||||
3. It requests the API server to manage `Flux`-related resources
|
||||
- _operators_
|
||||
- _CRDs_
|
||||
- logs
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Flux -- for more info
|
||||
|
||||
Please, refer to the [`Flux` chapter in the High Five M3 module](./3.yml.html#toc-helm-chart-format)
|
||||
|
||||
---
|
||||
|
||||
### Flux relies on Kustomize
|
||||
|
||||
The `Flux` component named `kustomize controller` look for `Kustomize` resources in `Flux` code-based sources
|
||||
|
||||
1. `Kustomize` look for `YAML` manifests listed in the `kustomization.yaml` file
|
||||
|
||||
2. and aggregates, hydrates and patches them following the `kustomization` configuration
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### 2 different kustomization resources
|
||||
|
||||
⚠️ `Flux` uses 2 distinct resources with `kind: kustomization`
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: kustomization
|
||||
```
|
||||
|
||||
describes how Kustomize (the _CLI_ tool) appends and transforms `YAML` manifests into a single bunch of `YAML` described resources
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1 group
|
||||
kind: Kustomization
|
||||
```
|
||||
|
||||
describes where `Flux kustomize-controller` looks for a `kustomization.yaml` file in a given `Flux` code-based source
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Kustomize -- for more info
|
||||
|
||||
Please, refer to the [`Kustomize` chapter in the High Five M3 module](./3.yml.html#toc-kustomize)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Group / Version / Kind -- for more info
|
||||
|
||||
For more info about how Kubernetes resource natures are identified by their `Group / Version / Kind` triplet…
|
||||
… please, refer to the [`Kubernetes API` chapter in the High Five M5 module](./5.yml.html#toc-the-kubernetes-api)
|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:3
|
||||
branch MOVY order:4
|
||||
branch YouRHere order:5
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
</pre>
|
||||
@@ -1,200 +0,0 @@
|
||||
# Multi-tenants management with Flux
|
||||
|
||||
💡 Thanks to `Flux`, we can manage Kubernetes resources from inside the clusters.
|
||||
|
||||
The **_⚙️OPS_** team uses `Flux` with a _GitOps_ code base to:
|
||||
- configure the clusters
|
||||
- deploy tools and components to extend the clusters capabilites
|
||||
- configure _GitOps_ workflow for dev teams in **dedicated and isolated _tenants_**
|
||||
|
||||
The **_🎸ROCKY_** team uses `Flux` to deploy every new release of its app, by detecting every new `git push` events happening in its app `Github` repository
|
||||
|
||||
|
||||
The **_🎬MOVY_** team uses `Flux` to deploy every new release of its app, packaged and published in a new `Helm` chart release
|
||||
|
||||
---
|
||||
|
||||
## Creating _tenants_ with Flux
|
||||
|
||||
While basic `Flux` behavior is to use a single configuration directory applied by a cluster-wide role…
|
||||
|
||||
… it can also enable _multi-tenant_ configuration by:
|
||||
- creating dedicated directories for each _tenant_ in its configuration code base
|
||||
- and using a dedicated `ServiceAccount` with limited permissions to operate in each _tenant_
|
||||
|
||||
Several _tenants_ are created
|
||||
- per env
|
||||
- for **_⚗️TEST_**
|
||||
- and **_🏭PROD_**
|
||||
- per team
|
||||
- for **_🎸ROCKY_**
|
||||
- and **_🎬MOVY_**
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Flux CLI works locally
|
||||
|
||||
First, we have to **locally** clone your `Flux` configuration `Github` repository
|
||||
|
||||
- create an ssh key pair
|
||||
- add the **public** key to your `Github` repository (**with write access**)
|
||||
- and git clone the repository
|
||||
|
||||
---
|
||||
|
||||
### The command line 1/2
|
||||
|
||||
Creating the **_⚗️TEST_** _tenant_
|
||||
|
||||
.lab[
|
||||
|
||||
- ⚠️ Think about renaming the repo with your own suffix
|
||||
```bash
|
||||
k8s@shpod:~$ cd fleet-config-using-flux-XXXXX/
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
flux create kustomization tenant-test \
|
||||
--namespace=flux-system \
|
||||
--source=GitRepository/flux-system \
|
||||
--path ./tenants/test \
|
||||
--interval=1m \
|
||||
--prune --export >> clusters/CLOUDY/tenants.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### The command line 2/2
|
||||
|
||||
Then we create the **_🏭PROD_** _tenant_
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
flux create kustomization tenant-prod \
|
||||
--namespace=flux-system \
|
||||
--source=GitRepository/flux-system \
|
||||
--path ./tenants/prod \
|
||||
--interval=3m \
|
||||
--prune --export >> clusters/CLOUDY/tenants.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### 📂 Flux tenants.yaml files
|
||||
|
||||
Let's review the `fleet-config-using-flux-XXXXX/clusters/CLOUDY/tenants.yaml` file
|
||||
|
||||
|
||||
|
||||
|
||||
⚠️ The last command we type in `Flux` _CLI_ creates the `YAML` manifest **locally**
|
||||
|
||||
> ☝🏻 Don't forget to `git commit` and `git push` to `Github`!
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Our 1st Flux error
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux get all
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
flux-system gitrepository/flux-system main@sha1:0466652e False
|
||||
True stored artifact for revision 'main@sha1:0466652e'
|
||||
|
||||
NAMESPACE NAME REVISION SUSPENDED
|
||||
READY MESSAGE
|
||||
kustomization/flux-system main@sha1:0466652e False True
|
||||
Applied revision: main@sha1:0466652e
|
||||
kustomization/tenant-prod False False
|
||||
kustomization path not found: stat /tmp/kustomization-417981261/tenants/prod: no such file or directory
|
||||
kustomization/tenant-test False False
|
||||
kustomization path not found: stat /tmp/kustomization-2532810750/tenants/test: no such file or directory
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
> Our configuration may be incomplete 😅
|
||||
|
||||
---
|
||||
|
||||
## Configuring Flux for the **_🎸ROCKY_** team
|
||||
|
||||
What the **_⚙️OPS_** team has to do:
|
||||
|
||||
- 🔧 Create a dedicated `rocky` _tenant_ for **_⚗️TEST_** and **_🏭PROD_** envs on the cluster
|
||||
|
||||
- 🔧 Create the `Flux` source pointing to the `Github` repository embedding the **_🎸ROCKY_** app source code
|
||||
|
||||
- 🔧 Add a `kustomize` _patch_ into the global `Flux` config to include this specific `Flux` config. dedicated to the deployment of the **_🎸ROCKY_** app
|
||||
|
||||
What the **_🎸ROCKY_** team has to do:
|
||||
|
||||
- 👨💻 Create the `kustomization.yaml` file in the **_🎸ROCKY_** app source code repository on `Github`
|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:3
|
||||
branch MOVY order:4
|
||||
branch YouRHere order:5
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
</pre>
|
||||
@@ -1,284 +0,0 @@
|
||||
# T05- Configuring ingress for **_🎸ROCKY_** app
|
||||
|
||||
🍾 **_🎸ROCKY_** team has just deployed its `v1.0.0`
|
||||
|
||||
We would like to reach it from our workstations
|
||||
The regular way to do it in Kubernetes is to configure an `Ingress` resource.
|
||||
|
||||
- `Ingress` is an abstract resource that manages how services are exposed outside of the Kubernetes cluster (Layer 7).
|
||||
- It relies on `ingress-controller`(s) that are technical solutions to handle all the rules related to ingress.
|
||||
|
||||
- Available features vary, depending on the `ingress-controller`: load-balancing, networking, firewalling, API management, throttling, TLS encryption, etc.
|
||||
- `ingress-controller` may provision Cloud _IaaS_ network resources such as load-balancer, persistent IPs, etc.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Ingress -- for more info
|
||||
|
||||
Please, refer to the [`Ingress` chapter in the High Five M2 module](./2.yml.html#toc-exposing-http-services-with-ingress-resources)
|
||||
|
||||
---
|
||||
|
||||
## Installing `ingress-nginx` as our `ingress-controller`
|
||||
|
||||
We'll use `ingress-nginx` (relying on `NGinX`), quite a popular choice.
|
||||
|
||||
- It is able to provision IaaS load-balancer in ScaleWay Cloud services
|
||||
- As a reverse-proxy, it is able to balance HTTP connections on an on-premises cluster
|
||||
|
||||
The **_⚙️OPS_** Team add this new install to its `Flux` config. repo
|
||||
|
||||
---
|
||||
|
||||
### Creating a `Github` source in Flux for `ingress-nginx`
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
mkdir -p ./clusters/CLOUDY/ingress-nginx && \
|
||||
flux create source git ingress-nginx \
|
||||
--namespace=ingress-nginx \
|
||||
--url=https://github.com/kubernetes/ingress-nginx/ \
|
||||
--branch=release-1.12 \
|
||||
--export > ./clusters/CLOUDY/ingress-nginx/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Creating `kustomization` in Flux for `ingress-nginx`
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create kustomization ingress-nginx \
|
||||
--namespace=ingress-nginx \
|
||||
--source=GitRepository/ingress-nginx \
|
||||
--path="./deploy/static/provider/scw/" \
|
||||
--export >> ./clusters/CLOUDY/ingress-nginx/sync.yaml
|
||||
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cp -p ~/container.training/k8s/M6-ingress-nginx-kustomization.yaml \
|
||||
./clusters/CLOUDY/ingress-nginx/kustomization.yaml && \
|
||||
cp -p ~/container.training/k8s/M6-ingress-nginx-components.yaml \
|
||||
~/container.training/k8s/M6-ingress-nginx-*-patch.yaml \
|
||||
./clusters/CLOUDY/ingress-nginx/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Applying the new config
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
git add ./clusters/CLOUDY/ingress-nginx && \
|
||||
git commit -m':wrench: :rocket: add Ingress-controller' && \
|
||||
git push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Using external Git source
|
||||
|
||||
💡 Note that you can directly use pubilc `Github` repository (not maintained by your company).
|
||||
|
||||
- If you have to alter the configuration, `Kustomize` patching capabilities might help.
|
||||
|
||||
- Depending on the _gitflow_ this repository uses, updates will be deployed automatically to your cluster (here we're using a `release` branch).
|
||||
|
||||
- This repo exposes a `kustomization.yaml`. Well done!
|
||||
|
||||
---
|
||||
|
||||
## Adding the `ingress` resource to ROCKY app
|
||||
|
||||
.lab[
|
||||
|
||||
- Add the new manifest to our kustomization bunch
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
cp -pr ~/container.training/k8s/M6-rocky-ingress.yaml ./tenants/base/rocky && \
|
||||
echo '- M6-rocky-ingress.yaml' >> ./tenants/base/rocky/kustomization.yaml
|
||||
```
|
||||
|
||||
- Commit and its done
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
git add . && \
|
||||
git commit -m':wrench: :rocket: add Ingress' && \
|
||||
git push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Here is the result
|
||||
|
||||
After Flux reconciled the whole bunch of sources and kustomizations, you should see
|
||||
|
||||
- `Ingress-NGinX` controller components in `ingress-nginx` namespace
|
||||
- A new `Ingress` in `rocky-test` namespace
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ kubectl get all -n ingress-nginx && \
|
||||
kubectl get ingress -n rocky-test
|
||||
|
||||
k8s@shpod:~$ \
|
||||
PublicIP=$(kubectl get ingress rocky -n rocky-test \
|
||||
-o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
|
||||
k8s@shpod:~$ \
|
||||
curl --header 'rocky.test.mybestdomain.com' http://$PublicIP/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Upgrading **_🎸ROCKY_** app
|
||||
|
||||
**_🎸ROCKY_** team is now fully able to upgrade and deploy its app autonomously.
|
||||
|
||||
Just give it a try!
|
||||
- In the `deployment.yaml` file
|
||||
- in the app repo ([https://github.com/Musk8teers/container.training-spring-music/])
|
||||
- you can change the `spec.template.spec.containers.image` to `1.0.1` and then to `1.0.2`
|
||||
|
||||
Dont' forget which branch is watched by `Flux` Git source named `rocky`
|
||||
|
||||
Don't forget to commit!
|
||||
|
||||
---
|
||||
|
||||
## Few considerations
|
||||
|
||||
- The **_⚙️OPS_** team has to decide how to manage name resolution for public IPs
|
||||
- Scaleway propose to expose a wildcard domain for its Kubernetes clusters
|
||||
|
||||
- Here, we chose that `Ingress-controller` (that makes sense) but `Ingress` as well were managed by the **_⚙️OPS_** team.
|
||||
- It might have been done in many different ways!
|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:3
|
||||
branch MOVY order:4
|
||||
branch YouRHere order:5
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
</pre>
|
||||
@@ -1,353 +0,0 @@
|
||||
# Installing a Kubernetes cluster from scratch
|
||||
|
||||
We operated a managed cluster from **Scaleway** `Kapsule`.
|
||||
|
||||
It's great! Most batteries are included:
|
||||
|
||||
- storage classes, with an already configured default one
|
||||
- a default CNI with `Cilium`
|
||||
<br/>(`Calico` is supported too)
|
||||
- a _IaaS_ load-balancer that is manageable by `ingress-controllers`
|
||||
- a management _WebUI_ with the Kubernetes dashboard
|
||||
- an observability stack with `metrics-server` and the Kubernetes dashboard
|
||||
|
||||
But what about _on premises_ needs?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## On premises Kubernetes distributions
|
||||
|
||||
The [CNCF landscape](https://landscape.cncf.io/?fullscreen=yes&zoom=200&group=certified-partners-and-providers) currently lists **61!** Kubernetes distributions, today.
|
||||
Not speaking of Kubernetes managed services from Cloud providers…
|
||||
|
||||
Please, refer to the [`Setting up Kubernetes` chapter in the High Five M2 module](./2.yml.html#toc-setting-up-kubernetes) for more infos about Kubernetes distributions.
|
||||
|
||||
---
|
||||
|
||||
## Introducing k0s
|
||||
|
||||
Nowadays, some "light" distros are considered good enough to run production clusters.
|
||||
That's the case for `k0s`.
|
||||
|
||||
It's an open source Kubernetes lightweight distribution.
|
||||
Mainly relying on **Mirantis**, a long-time software vendor in Kubernetes ecosystem.
|
||||
(The ones who bought `Docker Enterprise` a long time ago. remember?)
|
||||
|
||||
`k0s` aims to be both
|
||||
|
||||
- a lightweight distribution for _edge-computing_ and development pupose
|
||||
- an enterprise-grade HA distribution fully supported by its editor
|
||||
<br/>`MKE4` and `kordent` leverage on `k0s`
|
||||
|
||||
---
|
||||
|
||||
### `k0s` package
|
||||
|
||||
Its single binary includes:
|
||||
|
||||
- a CRI (`containerd`)
|
||||
- Kubernetes vanilla control plane components (including both `etcd`)
|
||||
- a vanilla network stack
|
||||
- `kube-router`
|
||||
- `kube-proxy`
|
||||
- `coredns`
|
||||
- `konnectivity`
|
||||
- `kubectl` CLI
|
||||
- install / uninstall features
|
||||
- backup / restore features
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Konnectivity
|
||||
|
||||
You've seen that Kubernetes cluster architecture is very versatile.
|
||||
I'm referring to the [`Kubernetes architecture` chapter in the High Five M5 module](./5.yml.html#toc-kubernetes-architecture)
|
||||
|
||||
Network communications between control plane components and worker nodes might be uneasy to configure.
|
||||
`Konnectivity` is a response to this pain. It acts as an RPC proxy for any communication initiated from control plane to workers.
|
||||
|
||||
These communications are listed in [`Kubernetes internal APIs` chapter in the High Five M5 module](https://2025-01-enix.container.training/5.yml.html#toc-kubernetes-internal-apis)
|
||||
|
||||
The agent deployed on each worker node maintains an RPC tunnel with the one deployed on control plane side.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Installing `k0s`
|
||||
|
||||
It installs with a one-liner command
|
||||
|
||||
- either in single-node lightweight footprint
|
||||
- or in multi-nodes HA footprint
|
||||
|
||||
.lab[
|
||||
|
||||
- Get the binary
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ wget https://github.com/k0sproject/k0sctl/releases/download/v0.25.1/k0sctl-linux-amd64
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Prepare the config file
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the config file
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ k0sctl init \
|
||||
--controller-count 3 \
|
||||
--user docker \
|
||||
--k0s m621 m622 m623 > k0sctl.yaml
|
||||
```
|
||||
|
||||
- change the following field: `spec.hosts[*].role: controller+worker`
|
||||
- add the following fields: `spec.hosts[*].noTaints: true`
|
||||
|
||||
```bash
|
||||
docker@m621: ~$ k0sctl apply --config k0sctl.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### And the famous one-liner
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod: ~$ k0sctl apply --config k0sctl.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Check that k0s installed correctly
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s status
|
||||
Version: v1.33.1+k0s.1
|
||||
Process ID: 60183
|
||||
Role: controller
|
||||
Workloads: true
|
||||
SingleNode: false
|
||||
Kube-api probing successful: true
|
||||
Kube-api probing last error:
|
||||
|
||||
docker@m621 ~$ sudo k0s etcd member-list
|
||||
{"members":{"m621":"https://10.10.3.190:2380","m622":"https://10.10.2.92:2380","m623":"https://10.10.2.110:2380"}}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### `kubectl` is included
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
m621 Ready control-plane 66m v1.33.1+k0s
|
||||
m622 Ready control-plane 66m v1.33.1+k0s
|
||||
m623 Ready control-plane 66m v1.33.1+k0s
|
||||
|
||||
docker@m621 ~$ sudo k0s kubectl run shpod --image jpetazzo/shpod
|
||||
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Single node install (for info!)
|
||||
|
||||
For testing purpose, you may want to use a single-node (yet `etcd`-geared) install…
|
||||
|
||||
.lab[
|
||||
|
||||
- Install it
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ curl -sSLf https://get.k0s.sh | sudo sh
|
||||
docker@m621 ~$ sudo k0s install controller --single
|
||||
docker@m621 ~$ sudo k0s start
|
||||
```
|
||||
|
||||
- Reset it
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s start
|
||||
docker@m621 ~$ sudo k0s reset
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying shpod
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
docker@m621 ~$ sudo k0s kubectl apply -f https://shpod.in/shpod.yaml
|
||||
docker@m621 ~$ sudo k0s kubectl apply -f https://shpod.in/shpod.yaml
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Flux install
|
||||
|
||||
We'll install `Flux`.
|
||||
And replay the all scenario a 2nd time.
|
||||
Let's face it: we don't have that much time. 😅
|
||||
|
||||
Since all our install and configuration is `GitOps`-based, we might just leverage on copy-paste and code configuration…
|
||||
Maybe.
|
||||
|
||||
Let's copy the 📂 `./clusters/CLOUDY` folder and rename it 📂 `./clusters/METAL`.
|
||||
|
||||
---
|
||||
|
||||
### Modifying Flux config 📄 files
|
||||
|
||||
- In 📄 file `./clusters/METAL/flux-system/gotk-sync.yaml`
|
||||
</br>change the `Kustomization` value `spec.path: ./clusters/METAL`
|
||||
- ⚠️ We'll have to adapt the `Flux` _CLI_ command line
|
||||
|
||||
- And that's pretty much it!
|
||||
- We'll see if anything goes wrong on that new cluster
|
||||
|
||||
---
|
||||
|
||||
### Connecting to our dedicated `Github` repo to host Flux config
|
||||
|
||||
.lab[
|
||||
|
||||
- let's replace `GITHUB_TOKEN` and `GITHUB_REPO` values
|
||||
- don't forget to change the patch to `clusters/METAL`
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ export GITHUB_TOKEN="my-token" && \
|
||||
export GITHUB_USER="container-training-fleet" && \
|
||||
export GITHUB_REPO="fleet-config-using-flux-XXXXX"
|
||||
|
||||
k8s@shpod:~$ flux bootstrap github \
|
||||
--owner=${GITHUB_USER} \
|
||||
--repository=${GITHUB_REPO} \
|
||||
--team=OPS \
|
||||
--team=ROCKY --team=MOVY \
|
||||
--path=clusters/METAL
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Flux deployed our complete stack
|
||||
|
||||
Everything seems to be here but…
|
||||
|
||||
- one database is in `Pending` state
|
||||
|
||||
- our `ingresses` don't work well
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ curl --header 'Host: rocky.test.enixdomain.com' http://${myIngressControllerSvcIP}
|
||||
curl: (52) Empty reply from server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Fixing the Ingress
|
||||
|
||||
The current `ingress-nginx` configuration leverages on specific annotations used by Scaleway to bind a _IaaS_ load-balancer to the `ingress-controller`.
|
||||
We don't have such kind of things here.😕
|
||||
|
||||
- We could bind our `ingress-controller` to a `NodePort`.
|
||||
`ingress-nginx` install manifests propose it here:
|
||||
</br>https://github.com/kubernetes/ingress-nginx/deploy/static/provider/baremetal
|
||||
|
||||
- In the 📄file `./clusters/METAL/ingress-nginx/sync.yaml`,
|
||||
</br>change the `Kustomization` value `spec.path: ./deploy/static/provider/baremetal`
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Troubleshooting the database
|
||||
|
||||
One of our `db-0` pod is in `Pending` state.
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ k get pods db-0 -n *-test -oyaml
|
||||
(…)
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: "2025-06-11T11:15:42Z"
|
||||
message: '0/3 nodes are available: pod has unbound immediate PersistentVolumeClaims.
|
||||
preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling.'
|
||||
reason: Unschedulable
|
||||
status: "False"
|
||||
type: PodScheduled
|
||||
phase: Pending
|
||||
qosClass: Burstable
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Troubleshooting the PersistentVolumeClaims
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ k get pvc postgresql-data-db-0 -n *-test -o yaml
|
||||
(…)
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal FailedBinding 9s (x182 over 45m) persistentvolume-controller no persistent volumes available for this claim and no storage class is set
|
||||
```
|
||||
|
||||
No `storage class` is available on this cluster.
|
||||
We hadn't the problem on our managed cluster since a default storage class was configured and then associated to our `PersistentVolumeClaim`.
|
||||
|
||||
Why is there no problem with the other database?
|
||||
|
||||
---
|
||||
|
||||
## Installing OpenEBS as our CSI
|
||||
@@ -1,241 +0,0 @@
|
||||
## introducing Kyverno
|
||||
|
||||
Kyverno is a tool to extend Kubernetes permission management to express complex policies…
|
||||
</br>… and override manifests delivered by client teams.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Kyverno -- for more info
|
||||
|
||||
Please, refer to the [`Setting up Kubernetes` chapter in the High Five M4 module](./4.yml.html#toc-policy-management-with-kyverno) for more infos about `Kyverno`.
|
||||
|
||||
---
|
||||
|
||||
## Creating an `Helm` source in Flux for OpenEBS Helm chart
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
mkdir -p clusters/CLOUDY/kyverno && \
|
||||
cp -pr ~/container.training/k8s/
|
||||
|
||||
k8s@shpod ~$ flux create source helm kyverno \
|
||||
--namespace=kyverno \
|
||||
--url=https://kyverno.github.io/kyverno/ \
|
||||
--interval=3m \
|
||||
--export > ./clusters/CLOUDY/kyverno/sync2.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the `HelmRelease` in Flux
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod ~$ flux create helmrelease kyverno \
|
||||
--namespace=kyverno \
|
||||
--source=HelmRepository/kyverno.flux-system \
|
||||
--target-namespace=kyverno \
|
||||
--create-target-namespace=true \
|
||||
--chart-version=">=3.4.2" \
|
||||
--chart=kyverno \
|
||||
--export >> ./clusters/CLOUDY/kyverno/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Add Kyverno policy
|
||||
|
||||
This polivy is just an example.
|
||||
It enforces the use of a `Service Account` in `Flux` configurations
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
mkdir -p clusters/CLOUDY/kyverno-policies && \
|
||||
cp -pr ~/container.training/k8s/M6-kyverno-enforce-service-account.yaml \
|
||||
./clusters/CLOUDY/kyverno-policies/
|
||||
|
||||
---
|
||||
|
||||
### Creating `kustomization` in Flux for Kyverno policies
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ \
|
||||
flux create kustomization kyverno-policies \
|
||||
--namespace=kyverno \
|
||||
--source=GitRepository/flux-system \
|
||||
--path="./clusters/CLOUDY/kyverno-policies/" \
|
||||
--prune true --interval 5m \
|
||||
--depends-on kyverno \
|
||||
--export >> ./clusters/CLOUDY/kyverno-policies/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
|
||||
## Apply Kyverno policy
|
||||
```bash
|
||||
flux create kustomization
|
||||
|
||||
--path
|
||||
--source GitRepository/
|
||||
--export > ./clusters/CLOUDY/kyverno-policies/sync.yaml
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Add Kyverno dependency for **_⚗️TEST_** cluster
|
||||
|
||||
- Now that we've got `Kyverno` policies,
|
||||
- ops team will enforce any upgrade from any kustomization in our dev team tenants
|
||||
- to wait for the `kyverno` policies to be reconciled (in a `Flux` perspective)
|
||||
|
||||
- upgrade file `./clusters/CLOUDY/tenants.yaml`,
|
||||
- by adding this property: `spec.dependsOn.{name: kyverno-policies}`
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### Debugging
|
||||
|
||||
`Kyverno-policies` `Kustomization` failed because `spec.dependsOn` property can only target a resource from the same `Kind`.
|
||||
|
||||
- Let's suppress the `spec.dependsOn` property.
|
||||
|
||||
Now `Kustomizations` for **_🎸ROCKY_** and **_🎬MOVY_** tenants failed because of our policies.
|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:4
|
||||
branch MOVY order:5
|
||||
branch YouRHere order:6
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT tag:'T07'
|
||||
|
||||
checkout OPS
|
||||
commit id:'k0s install on METAL cluster' tag:'K01'
|
||||
commit id:'Flux config. for METAL cluster' tag:'K02'
|
||||
branch METAL_TEST-PROD order:3
|
||||
commit id:'ROCKY/MOVY tenants on METAL' type: HIGHLIGHT
|
||||
checkout OPS
|
||||
commit id:'Flux config. for OpenEBS' tag:'K03'
|
||||
checkout METAL_TEST-PROD
|
||||
merge OPS id:'openEBS on METAL' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Prometheus install'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Kyverno install'
|
||||
commit id:'Kyverno rules'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for PROD tenant' tag:'P01'
|
||||
branch PROD-env order:2
|
||||
commit id:'ROCKY tenant on PROD'
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for PROD' tag:'R04'
|
||||
checkout PROD-env
|
||||
merge OPS id:'PROD ready to deploy ROCKY' type: HIGHLIGHT
|
||||
checkout PROD-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY HELM chart' tag:'M03'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0'
|
||||
</pre>
|
||||
@@ -1,251 +0,0 @@
|
||||
# Install monitoring stack
|
||||
|
||||
The **_⚙️OPS_** team wants to have a real monitoring stack for its clusters.
|
||||
Let's deploy `Prometheus` and `Grafana` onto the clusters.
|
||||
|
||||
Note:
|
||||
|
||||
---
|
||||
|
||||
## Creating `Github` source in Flux for monitoring components install repository
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ mkdir -p clusters/CLOUDY/kube-prometheus-stack
|
||||
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create source git monitoring \
|
||||
--namespace=monitoring \
|
||||
--url=https://github.com/fluxcd/flux2-monitoring-example.git \
|
||||
--branch=main --export > ./clusters/CLOUDY/kube-prometheus-stack/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Creating `kustomization` in Flux for monitoring stack
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create kustomization monitoring \
|
||||
--namespace=monitoring \
|
||||
--source=GitRepository/monitoring \
|
||||
--path="./monitoring/controllers/kube-prometheus-stack/" \
|
||||
--export >> ./clusters/CLOUDY/kube-prometheus-stack/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
### Install Flux Grafana dashboards
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
k8s@shpod:~/fleet-config-using-flux-XXXXX$ flux create kustomization dashboards \
|
||||
--namespace=monitoring \
|
||||
--source=GitRepository/monitoring \
|
||||
--path="./monitoring/configs/" \
|
||||
--export >> ./clusters/CLOUDY/kube-prometheus-stack/sync.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Flux repository synchro is broken😅
|
||||
|
||||
It seems that `Flux` on **_☁️CLOUDY_** cluster is not able to authenticate with `ssh` on its `Github` config repository!
|
||||
|
||||
What happened?
|
||||
When we install `Flux` on **_🤘METAL_** cluster, it generates a new `ssh` keypair and override the one used by **_☁️CLOUDY_** among the "deployment keys" of the `Github` repository.
|
||||
|
||||
⚠️ Beware of flux bootstrap command!
|
||||
|
||||
We have to
|
||||
- generate a new keypair (or reuse an already existing one)
|
||||
- add the private key to the Flux-dedicated secrets in **_☁️CLOUDY_** cluster
|
||||
- add it to the "deployment keys" of the `Github` repository
|
||||
|
||||
---
|
||||
|
||||
### the command
|
||||
|
||||
.lab[
|
||||
|
||||
- `Flux` _CLI_ helps to recreate the secret holding the `ssh` **private** key.
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ flux create secret git flux-system \
|
||||
--url=ssh://git@github.com/container-training-fleet/fleet-config-using-flux-XXXXX \
|
||||
--private-key-file=/home/k8s/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
- copy the **public** key into the deployment keys of the `Github` repository
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Access the Grafana dashboard
|
||||
|
||||
.lab[
|
||||
|
||||
- Get the `Host` and `IP` address to request
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ kubectl -n monitoring get ingress
|
||||
NAME CLASS HOSTS ADDRESS PORTS AGE
|
||||
grafana nginx grafana.test.metal.mybestdomain.com 62.210.39.83 80 6m30s
|
||||
```
|
||||
|
||||
- Get the `Grafana` admin password
|
||||
|
||||
```bash
|
||||
k8s@shpod:~$ k get secret kube-prometheus-stack-grafana -n monitoring \
|
||||
-o jsonpath='{.data.admin-password}' | base64 -d
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
## And browse…
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
### 🗺️ Where are we in our scenario?
|
||||
|
||||
<pre class="mermaid">
|
||||
%%{init:
|
||||
{
|
||||
"theme": "default",
|
||||
"gitGraph": {
|
||||
"mainBranchName": "OPS",
|
||||
"mainBranchOrder": 0
|
||||
}
|
||||
}
|
||||
}%%
|
||||
gitGraph
|
||||
commit id:"0" tag:"start"
|
||||
branch ROCKY order:4
|
||||
branch MOVY order:5
|
||||
branch YouRHere order:6
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux install on CLOUDY cluster' tag:'T01'
|
||||
branch TEST-env order:1
|
||||
commit id:'FLUX install on TEST' tag:'T02' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for TEST tenant' tag:'T03'
|
||||
commit id:'namespace isolation by RBAC'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ROCKY tenant creation' tag:'T04'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY deploy. config.' tag:'R01'
|
||||
|
||||
checkout TEST-env
|
||||
merge OPS id:'TEST ready to deploy ROCKY' type: HIGHLIGHT tag:'R02'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'ROCKY' tag:'v1.0.0'
|
||||
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.0'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Ingress-controller config.' tag:'T05'
|
||||
checkout TEST-env
|
||||
merge OPS id:'Ingress-controller install' type: HIGHLIGHT tag:'T06'
|
||||
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for ingress config.' tag:'R03'
|
||||
checkout TEST-env
|
||||
merge OPS id:'ingress config. for ROCKY app'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'blue color' tag:'v1.0.1'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.1'
|
||||
|
||||
checkout ROCKY
|
||||
commit id:'pink color' tag:'v1.0.2'
|
||||
checkout TEST-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout OPS
|
||||
commit id:'FLUX config for MOVY deployment' tag:'M01'
|
||||
checkout TEST-env
|
||||
merge OPS id:'FLUX ready to deploy MOVY' type: HIGHLIGHT tag:'M02'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY' tag:'v1.0.3'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0.3' type: REVERSE
|
||||
|
||||
checkout OPS
|
||||
commit id:'Network policies'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT tag:'T07'
|
||||
|
||||
checkout OPS
|
||||
commit id:'k0s install on METAL cluster' tag:'K01'
|
||||
commit id:'Flux config. for METAL cluster' tag:'K02'
|
||||
branch METAL_TEST-PROD order:3
|
||||
commit id:'ROCKY/MOVY tenants on METAL' type: HIGHLIGHT
|
||||
checkout OPS
|
||||
commit id:'Flux config. for OpenEBS' tag:'K03'
|
||||
checkout METAL_TEST-PROD
|
||||
merge OPS id:'openEBS on METAL' type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Prometheus install'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout YouRHere
|
||||
commit id:'x'
|
||||
checkout OPS
|
||||
merge YouRHere id:'YOU ARE HERE'
|
||||
|
||||
checkout OPS
|
||||
commit id:'Kyverno install'
|
||||
commit id:'Kyverno rules'
|
||||
checkout TEST-env
|
||||
merge OPS type: HIGHLIGHT
|
||||
|
||||
checkout OPS
|
||||
commit id:'Flux config. for PROD tenant' tag:'P01'
|
||||
branch PROD-env order:2
|
||||
commit id:'ROCKY tenant on PROD'
|
||||
checkout OPS
|
||||
commit id:'ROCKY patch for PROD' tag:'R04'
|
||||
checkout PROD-env
|
||||
merge OPS id:'PROD ready to deploy ROCKY' type: HIGHLIGHT
|
||||
checkout PROD-env
|
||||
merge ROCKY tag:'ROCKY v1.0.2'
|
||||
|
||||
checkout MOVY
|
||||
commit id:'MOVY HELM chart' tag:'M03'
|
||||
checkout TEST-env
|
||||
merge MOVY tag:'MOVY v1.0'
|
||||
</pre>
|
||||
@@ -32,7 +32,7 @@
|
||||
|
||||
- Problem mitigation
|
||||
|
||||
*block nodes with vulnerable kernels, inject log4j mitigations, rewrite images...*
|
||||
*block nodes with vulnerable kernels, inject log4j mitigations...*
|
||||
|
||||
- Extended validation for operators
|
||||
|
||||
@@ -583,38 +583,19 @@ Shell to the rescue!
|
||||
|
||||
---
|
||||
|
||||
## Real world examples
|
||||
|
||||
- [kube-image-keeper][kuik] rewrites image references to use cached images
|
||||
|
||||
(e.g. `nginx` → `localhost:7439/nginx`)
|
||||
|
||||
- [Kyverno] implements very extensive policies
|
||||
|
||||
(validation, generation... it deserves a whole chapter on its own!)
|
||||
|
||||
[kuik]: https://github.com/enix/kube-image-keeper
|
||||
[kyverno]: https://kyverno.io/
|
||||
|
||||
---
|
||||
|
||||
## Alternatives
|
||||
## Coming soon...
|
||||
|
||||
- Kubernetes Validating Admission Policies
|
||||
|
||||
- Relatively recent (alpha: 1.26, beta: 1.28, GA: 1.30)
|
||||
- Integrated with the Kubernetes API server
|
||||
|
||||
- Declare validation rules with Common Expression Language ([CEL][cel-spec])
|
||||
- Lets us define policies using [CEL (Common Expression Language)][cel-spec]
|
||||
|
||||
- Validation is done entirely within the API server
|
||||
- Available in beta in Kubernetes 1.28 <!-- ##VERSION## -->
|
||||
|
||||
(no external webhook = no latency, no deployment complexity...)
|
||||
- Check this [CNCF Blog Post][cncf-blog-vap] for more details
|
||||
|
||||
- Not as powerful as full-fledged webhook engines like Kyverno
|
||||
|
||||
(see e.g. [this page of the Kyverno doc][kyverno-vap] for a comparison)
|
||||
|
||||
[kyverno-vap]: https://kyverno.io/docs/policy-types/validating-policy/
|
||||
[cncf-blog-vap]: https://www.cncf.io/blog/2023/09/14/policy-management-in-kubernetes-is-changing/
|
||||
[cel-spec]: https://github.com/google/cel-spec
|
||||
|
||||
???
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
|
||||
## The chain of handlers
|
||||
|
||||
- API requests go through a complex chain of filters ([src](https://github.com/kubernetes/apiserver/blob/release-1.32/pkg/server/config.go#L1004))
|
||||
- API requests go through a complex chain of filters ([src](https://github.com/kubernetes/apiserver/blob/release-1.19/pkg/server/config.go#L671))
|
||||
|
||||
(note when reading that code: requests start at the bottom and go up)
|
||||
|
||||
|
||||