mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-28 16:30:21 +00:00
Compare commits
1 Commits
2024-10-fo
...
2023-12-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77606044f6 |
@@ -1,6 +1,6 @@
|
||||
FROM ruby:alpine
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra --version '~> 3'
|
||||
RUN gem install sinatra
|
||||
RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
|
||||
@@ -16,7 +16,8 @@ spec:
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
@@ -26,7 +27,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys >> /root/.ssh/authorized_keys"
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: sysctl
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sysctl
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sysctl
|
||||
spec:
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
initContainers:
|
||||
- name: sysctl
|
||||
image: alpine
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sysctl
|
||||
- fs.inotify.max_user_instances=99999
|
||||
containers:
|
||||
- name: pause
|
||||
image: registry.k8s.io/pause:3.8
|
||||
|
||||
@@ -25,7 +25,7 @@ cloudflare() {
|
||||
}
|
||||
|
||||
_list_zones() {
|
||||
cloudflare zones?per_page=100 | jq -r .result[].name
|
||||
cloudflare zones | jq -r .result[].name
|
||||
}
|
||||
|
||||
_get_zone_id() {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
# https://open-api.netlify.com/#tag/dnsZone
|
||||
[ "${1-}" ] || {
|
||||
[ "$1" ] || {
|
||||
echo ""
|
||||
echo "Add a record in Netlify DNS."
|
||||
echo "This script is hardcoded to add a record to container.training".
|
||||
@@ -20,7 +18,7 @@ set -eu
|
||||
}
|
||||
|
||||
NETLIFY_CONFIG_FILE=~/.config/netlify/config.json
|
||||
if ! [ "${DOMAIN-}" ]; then
|
||||
if ! [ "$DOMAIN" ]; then
|
||||
DOMAIN=container.training
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,29 +1,17 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Baseline resource usage per vcluster in our usecase:
|
||||
# 500 MB RAM
|
||||
# 10% CPU
|
||||
# (See https://docs.google.com/document/d/1n0lwp6rQKQUIuo_A5LQ1dgCzrmjkDjmDtNj1Jn92UrI)
|
||||
# PRO2-XS = 4 core, 16 gb
|
||||
|
||||
PROVIDER=scaleway
|
||||
# deploy big cluster
|
||||
#TF_VAR_node_size=g6-standard-6 \
|
||||
#TF_VAR_nodes_per_cluster=5 \
|
||||
#TF_VAR_location=eu-west \
|
||||
|
||||
case "$PROVIDER" in
|
||||
linode)
|
||||
export TF_VAR_node_size=g6-standard-6
|
||||
export TF_VAR_location=eu-west
|
||||
;;
|
||||
scaleway)
|
||||
export TF_VAR_node_size=PRO2-XS
|
||||
export TF_VAR_location=fr-par-2
|
||||
;;
|
||||
esac
|
||||
|
||||
./labctl create --mode mk8s --settings settings/konk.env --provider $PROVIDER --tag konk
|
||||
TF_VAR_node_size=PRO2-XS \
|
||||
TF_VAR_nodes_per_cluster=5 \
|
||||
TF_VAR_location=fr-par-2 \
|
||||
./labctl create --mode mk8s --settings settings/mk8s.env --provider scaleway --tag konk
|
||||
|
||||
# set kubeconfig file
|
||||
export KUBECONFIG=~/kubeconfig
|
||||
cp tags/konk/stage2/kubeconfig.101 $KUBECONFIG
|
||||
cp tags/konk/stage2/kubeconfig.101 ~/kubeconfig
|
||||
|
||||
# set external_ip labels
|
||||
kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name} {.status.addresses[?(@.type=="ExternalIP")].address}{"\n"}{end}' |
|
||||
@@ -33,11 +21,3 @@ done
|
||||
|
||||
# vcluster all the things
|
||||
./labctl create --settings settings/mk8s.env --provider vcluster --mode mk8s --students 50
|
||||
|
||||
# install prometheus stack because that's cool
|
||||
helm upgrade --install --repo https://prometheus-community.github.io/helm-charts \
|
||||
--namespace prom-system --create-namespace \
|
||||
kube-prometheus-stack kube-prometheus-stack
|
||||
|
||||
# and also fix sysctl
|
||||
kubectl apply -f ../k8s/sysctl.yaml --namespace kube-system
|
||||
|
||||
@@ -321,7 +321,6 @@ _cmd_clusterize() {
|
||||
pssh "
|
||||
set -e
|
||||
grep PSSH_ /etc/ssh/sshd_config || echo 'AcceptEnv PSSH_*' | sudo tee -a /etc/ssh/sshd_config
|
||||
grep KUBECOLOR_ /etc/ssh/sshd_config || echo 'AcceptEnv KUBECOLOR_*' | sudo tee -a /etc/ssh/sshd_config
|
||||
sudo systemctl restart ssh.service"
|
||||
|
||||
pssh -I < tags/$TAG/clusters.txt "
|
||||
@@ -393,7 +392,7 @@ _cmd_docker() {
|
||||
##VERSION## https://github.com/docker/compose/releases
|
||||
COMPOSE_VERSION=v2.11.1
|
||||
COMPOSE_PLATFORM='linux-$(uname -m)'
|
||||
|
||||
|
||||
# Just in case you need Compose 1.X, you can use the following lines.
|
||||
# (But it will probably only work for x86_64 machines.)
|
||||
#COMPOSE_VERSION=1.29.2
|
||||
@@ -494,7 +493,7 @@ EOF"
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/Release.key |
|
||||
curl -fsSL https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/Release.key |
|
||||
gpg --dearmor | sudo tee /etc/apt/keyrings/kubernetes-apt-keyring.gpg &&
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$KUBEREPOVERSION/deb/ /' |
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
@@ -504,7 +503,7 @@ EOF"
|
||||
sudo apt-mark hold kubelet kubeadm kubectl &&
|
||||
kubeadm completion bash | sudo tee /etc/bash_completion.d/kubeadm &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubecolor' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
}
|
||||
|
||||
@@ -517,7 +516,6 @@ _cmd_kubeadm() {
|
||||
CLUSTER_CONFIGURATION_KUBERNETESVERSION='kubernetesVersion: "v'$KUBEVERSION'"'
|
||||
IGNORE_SYSTEMVERIFICATION="- SystemVerification"
|
||||
IGNORE_SWAP="- Swap"
|
||||
IGNORE_IPTABLES="- FileContent--proc-sys-net-bridge-bridge-nf-call-iptables"
|
||||
fi
|
||||
|
||||
# Install a valid configuration for containerd
|
||||
@@ -541,7 +539,6 @@ nodeRegistration:
|
||||
- NumCPU
|
||||
$IGNORE_SYSTEMVERIFICATION
|
||||
$IGNORE_SWAP
|
||||
$IGNORE_IPTABLES
|
||||
---
|
||||
kind: JoinConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
@@ -555,7 +552,6 @@ nodeRegistration:
|
||||
- NumCPU
|
||||
$IGNORE_SYSTEMVERIFICATION
|
||||
$IGNORE_SWAP
|
||||
$IGNORE_IPTABLES
|
||||
---
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
@@ -638,31 +634,6 @@ _cmd_kubetools() {
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install ArgoCD CLI
|
||||
##VERSION## https://github.com/argoproj/argo-cd/releases/latest
|
||||
URL=https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-${ARCH}
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/argocd ]; then
|
||||
sudo curl -o /usr/local/bin/argocd -fsSL $URL
|
||||
sudo chmod +x /usr/local/bin/argocd
|
||||
argocd completion bash | sudo tee /etc/bash_completion.d/argocd
|
||||
argocd version --client
|
||||
fi"
|
||||
|
||||
# Install Flux CLI
|
||||
##VERSION## https://github.com/fluxcd/flux2/releases
|
||||
FLUX_VERSION=2.3.0
|
||||
FILENAME=flux_${FLUX_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/fluxcd/flux2/releases/download/v$FLUX_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/flux ]; then
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx flux
|
||||
sudo chmod +x /usr/local/bin/flux
|
||||
flux completion bash | sudo tee /etc/bash_completion.d/flux
|
||||
flux --version
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
set -e
|
||||
@@ -694,7 +665,7 @@ EOF
|
||||
|
||||
# Install stern
|
||||
##VERSION## https://github.com/stern/stern/releases
|
||||
STERN_VERSION=1.29.0
|
||||
STERN_VERSION=1.22.0
|
||||
FILENAME=stern_${STERN_VERSION}_linux_${ARCH}
|
||||
URL=https://github.com/stern/stern/releases/download/v$STERN_VERSION/$FILENAME.tar.gz
|
||||
pssh "
|
||||
@@ -716,7 +687,7 @@ EOF
|
||||
|
||||
# Install kustomize
|
||||
##VERSION## https://github.com/kubernetes-sigs/kustomize/releases
|
||||
KUSTOMIZE_VERSION=v5.4.1
|
||||
KUSTOMIZE_VERSION=v4.5.7
|
||||
URL=https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
@@ -747,16 +718,6 @@ EOF
|
||||
aws-iam-authenticator version
|
||||
fi"
|
||||
|
||||
# Install jless (jless.io)
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/jless ]; then
|
||||
##VERSION##
|
||||
sudo apt-get install -y libxcb-render0 libxcb-shape0 libxcb-xfixes0
|
||||
wget https://github.com/PaulJuliusMartinez/jless/releases/download/v0.9.0/jless-v0.9.0-x86_64-unknown-linux-gnu.zip
|
||||
unzip jless-v0.9.0-x86_64-unknown-linux-gnu
|
||||
sudo mv jless /usr/local/bin
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/$USER_LOGIN/.krew ]; then
|
||||
@@ -768,31 +729,21 @@ EOF
|
||||
echo export PATH=/home/$USER_LOGIN/.krew/bin:\\\$PATH | sudo -u $USER_LOGIN tee -a /home/$USER_LOGIN/.bashrc
|
||||
fi"
|
||||
|
||||
# Install kubecolor
|
||||
KUBECOLOR_VERSION=0.4.0
|
||||
URL=https://github.com/kubecolor/kubecolor/releases/download/v${KUBECOLOR_VERSION}/kubecolor_${KUBECOLOR_VERSION}_linux_${ARCH}.tar.gz
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubecolor ]; then
|
||||
##VERSION##
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx kubecolor
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
FILENAME=k9s_Linux_$ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx k9s
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
k9s version
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_Linux_$ARCH.tar.gz &&
|
||||
FILENAME=popeye_Linux_$HERP_DERP_ARCH.tar.gz &&
|
||||
curl -fsSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx popeye
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
popeye version
|
||||
fi"
|
||||
|
||||
@@ -802,10 +753,10 @@ EOF
|
||||
# But the install script is not arch-aware (see https://github.com/tilt-dev/tilt/pull/5050).
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
TILT_VERSION=0.33.13
|
||||
TILT_VERSION=0.22.15
|
||||
FILENAME=tilt.\$TILT_VERSION.linux.$TILT_ARCH.tar.gz
|
||||
curl -fsSL https://github.com/tilt-dev/tilt/releases/download/v\$TILT_VERSION/\$FILENAME |
|
||||
sudo tar -C /usr/local/bin -zx tilt
|
||||
sudo tar -zxvf- -C /usr/local/bin tilt
|
||||
tilt completion bash | sudo tee /etc/bash_completion.d/tilt
|
||||
tilt version
|
||||
fi"
|
||||
@@ -847,8 +798,7 @@ EOF
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/bitnami-labs/sealed-secrets/releases
|
||||
KUBESEAL_VERSION=0.26.2
|
||||
URL=https://github.com/bitnami-labs/sealed-secrets/releases/download/v${KUBESEAL_VERSION}/kubeseal-${KUBESEAL_VERSION}-linux-${ARCH}.tar.gz
|
||||
KUBESEAL_VERSION=0.17.4
|
||||
#case $ARCH in
|
||||
#amd64) FILENAME=kubeseal-linux-amd64;;
|
||||
#arm64) FILENAME=kubeseal-arm64;;
|
||||
@@ -856,13 +806,13 @@ EOF
|
||||
#esac
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -fsSL $URL |
|
||||
sudo tar -C /usr/local/bin -zx kubeseal
|
||||
curl -fsSL https://github.com/bitnami-labs/sealed-secrets/releases/download/v$KUBESEAL_VERSION/kubeseal-$KUBESEAL_VERSION-linux-$ARCH.tar.gz |
|
||||
sudo tar -zxvf- -C /usr/local/bin kubeseal
|
||||
kubeseal --version
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/vmware-tanzu/velero/releases
|
||||
VELERO_VERSION=1.13.2
|
||||
VELERO_VERSION=1.11.0
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/velero ]; then
|
||||
curl -fsSL https://github.com/vmware-tanzu/velero/releases/download/v$VELERO_VERSION/velero-v$VELERO_VERSION-linux-$ARCH.tar.gz |
|
||||
@@ -872,21 +822,13 @@ EOF
|
||||
fi"
|
||||
|
||||
##VERSION## https://github.com/doitintl/kube-no-trouble/releases
|
||||
KUBENT_VERSION=0.7.2
|
||||
KUBENT_VERSION=0.7.0
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubent ]; then
|
||||
curl -fsSL https://github.com/doitintl/kube-no-trouble/releases/download/${KUBENT_VERSION}/kubent-${KUBENT_VERSION}-linux-$ARCH.tar.gz |
|
||||
sudo tar -zxvf- -C /usr/local/bin kubent
|
||||
kubent --version
|
||||
fi"
|
||||
|
||||
# Ngrok. Note that unfortunately, this is the x86_64 binary.
|
||||
# We might have to rethink how to handle this for multi-arch environments.
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ngrok ]; then
|
||||
curl -fsSL https://bin.equinox.io/c/bNyj1mQVY4c/ngrok-v3-stable-linux-amd64.tgz |
|
||||
sudo tar -zxvf- -C /usr/local/bin ngrok
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
@@ -1020,19 +962,12 @@ _cmd_standardize() {
|
||||
# Disable unattended upgrades so that they don't mess up with the subsequent steps
|
||||
pssh sudo rm -f /etc/apt/apt.conf.d/50unattended-upgrades
|
||||
|
||||
# Some cloud providers think that it's smart to disable password authentication.
|
||||
# We need to re-neable it, though.
|
||||
# Digital Ocecan
|
||||
# Digital Ocean's cloud init disables password authentication; re-enable it.
|
||||
pssh "
|
||||
if [ -f /etc/ssh/sshd_config.d/50-cloud-init.conf ]; then
|
||||
sudo rm /etc/ssh/sshd_config.d/50-cloud-init.conf
|
||||
sudo systemctl restart ssh.service
|
||||
fi"
|
||||
# AWS
|
||||
pssh "if [ -f /etc/ssh/sshd_config.d/60-cloudimg-settings.conf ]; then
|
||||
sudo rm /etc/ssh/sshd_config.d/60-cloudimg-settings.conf
|
||||
sudo systemctl restart ssh.service
|
||||
fi"
|
||||
|
||||
# Special case for oracle since their iptables blocks everything but SSH
|
||||
pssh "
|
||||
@@ -1068,12 +1003,11 @@ _cmd_tailhist () {
|
||||
# halfway through and we're actually trying to download it again.
|
||||
pssh "
|
||||
set -e
|
||||
sudo apt-get install unzip -y
|
||||
wget -c https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0-linux_$ARCH.zip
|
||||
unzip websocketd-0.3.0-linux_$ARCH.zip websocketd
|
||||
sudo mv websocketd /usr/local/bin/websocketd
|
||||
sudo mkdir -p /opt/tailhist
|
||||
sudo tee /opt/tailhist.service <<EOF
|
||||
sudo mkdir -p /tmp/tailhist
|
||||
sudo tee /root/tailhist.service <<EOF
|
||||
[Unit]
|
||||
Description=tailhist
|
||||
|
||||
@@ -1081,16 +1015,16 @@ Description=tailhist
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/tailhist
|
||||
WorkingDirectory=/tmp/tailhist
|
||||
ExecStart=/usr/local/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/$USER_LOGIN/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /opt/tailhist.service --now
|
||||
sudo systemctl enable /root/tailhist.service --now
|
||||
"
|
||||
|
||||
pssh -I sudo tee /opt/tailhist/index.html <lib/tailhist.html
|
||||
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd tools "Install a bunch of useful tools (editors, git, jq...)"
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
DOMAINS=domains.txt
|
||||
IPS=ips.txt
|
||||
|
||||
. ./dns-cloudflare.sh
|
||||
|
||||
paste "$DOMAINS" "$IPS" | while read domain ips; do
|
||||
if ! [ "$domain" ]; then
|
||||
echo "⚠️ No more domains!"
|
||||
exit 1
|
||||
fi
|
||||
_clear_zone "$domain"
|
||||
_populate_zone "$domain" $ips
|
||||
done
|
||||
echo "✅ All done."
|
||||
@@ -7,7 +7,7 @@ USER_PASSWORD=training
|
||||
|
||||
# For a list of old versions, check:
|
||||
# https://kubernetes.io/releases/patch-releases/#non-active-branch-history
|
||||
KUBEVERSION=1.28.9
|
||||
KUBEVERSION=1.24.14
|
||||
|
||||
STEPS="
|
||||
wait
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
CLUSTERSIZE=5
|
||||
|
||||
USER_LOGIN=k8s
|
||||
USER_PASSWORD=
|
||||
|
||||
STEPS="stage2"
|
||||
@@ -1,7 +1,5 @@
|
||||
#export TF_VAR_node_size=GP2.4
|
||||
#export TF_VAR_node_size=g6-standard-6
|
||||
#export TF_VAR_node_size=m7i.xlarge
|
||||
|
||||
|
||||
CLUSTERSIZE=1
|
||||
|
||||
|
||||
@@ -1,23 +1,10 @@
|
||||
resource "scaleway_vpc_private_network" "_" {
|
||||
}
|
||||
|
||||
# This is a kind of hack to use a custom security group with Kapsulse.
|
||||
# See https://www.scaleway.com/en/docs/containers/kubernetes/reference-content/secure-cluster-with-private-network/
|
||||
|
||||
resource "scaleway_instance_security_group" "_" {
|
||||
name = "kubernetes ${split("/", scaleway_k8s_cluster._.id)[1]}"
|
||||
inbound_default_policy = "accept"
|
||||
outbound_default_policy = "accept"
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
name = var.cluster_name
|
||||
#region = var.location
|
||||
tags = var.common_tags
|
||||
version = local.k8s_version
|
||||
type = "kapsule"
|
||||
cni = "cilium"
|
||||
delete_additional_resources = true
|
||||
private_network_id = scaleway_vpc_private_network._.id
|
||||
}
|
||||
|
||||
resource "scaleway_k8s_pool" "_" {
|
||||
@@ -30,7 +17,6 @@ resource "scaleway_k8s_pool" "_" {
|
||||
max_size = var.max_nodes_per_pool
|
||||
autoscaling = var.max_nodes_per_pool > var.min_nodes_per_pool
|
||||
autohealing = true
|
||||
depends_on = [ scaleway_instance_security_group._ ]
|
||||
}
|
||||
|
||||
data "scaleway_k8s_version" "_" {
|
||||
|
||||
@@ -4,7 +4,6 @@ resource "helm_release" "_" {
|
||||
create_namespace = true
|
||||
repository = "https://charts.loft.sh"
|
||||
chart = "vcluster"
|
||||
version = "0.19.7"
|
||||
set {
|
||||
name = "service.type"
|
||||
value = "NodePort"
|
||||
|
||||
@@ -14,9 +14,9 @@ $ hcloud server-type list | grep shared
|
||||
variable "node_sizes" {
|
||||
type = map(any)
|
||||
default = {
|
||||
S = "cpx11"
|
||||
M = "cpx21"
|
||||
L = "cpx31"
|
||||
S = "cx11"
|
||||
M = "cx21"
|
||||
L = "cx31"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ data "openstack_images_image_v2" "_" {
|
||||
most_recent = true
|
||||
properties = {
|
||||
os = "ubuntu"
|
||||
version = "24.04"
|
||||
version = "22.04"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
@@ -16,12 +17,10 @@
|
||||
|
||||
# Shortlinks for next training in English and French
|
||||
#/next https://www.eventbrite.com/e/livestream-intensive-kubernetes-bootcamp-tickets-103262336428
|
||||
/next https://qconsf.com/training/nov2024/asynchronous-architecture-patterns-scale-ml-and-other-high-latency-workloads
|
||||
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
/us https://www.ardanlabs.com/live-training-events/deploying-microservices-and-traditional-applications-with-kubernetes-march-28-2022.html
|
||||
/uk https://skillsmatter.com/workshops/827-deploying-microservices-and-traditional-applications-with-kubernetes-with-jerome-petazzoni
|
||||
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
786
slides/autopilot/package-lock.json
generated
786
slides/autopilot/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -2,8 +2,8 @@
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.21.0",
|
||||
"socket.io": "^4.7.5",
|
||||
"socket.io-client": "^4.7.5"
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^4.6.1",
|
||||
"socket.io-client": "^4.5.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Exercise — Ingress Controller
|
||||
## Exercise — Ingress
|
||||
|
||||
- Add an ingress controller to a Kubernetes cluster
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Exercise — Ingress Controller
|
||||
# Exercise — Ingress
|
||||
|
||||
- We want to expose a couple of web apps through an ingress controller
|
||||
|
||||
@@ -128,4 +128,4 @@ This is similar to the previous scenario, but with two significant changes:
|
||||
|
||||
1. We only want to run the ingress controller on nodes that have the role `ingress`.
|
||||
|
||||
2. We want to either use `hostPort`, or a list of `externalIPs` (not `hostNetwork`).
|
||||
2. We don't want to use `hostNetwork`, but a list of `externalIPs` instead.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Exercise — Network Policies
|
||||
|
||||
We want to implement a generic network security mechanism.
|
||||
We want to to implement a generic network security mechanism.
|
||||
|
||||
Instead of creating one policy per service, we want to
|
||||
create a fixed number of policies, and use a single label
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Exercise — Enable RBAC
|
||||
## Exercise — Enable RBAC on our custom cluster
|
||||
|
||||
- Enable RBAC on a manually-deployed control plane
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Exercise — Enable RBAC
|
||||
# Exercise — Enable RBAC on our custom cluster
|
||||
|
||||
- We want to enable RBAC on the "polykube" cluster
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
## Exercise — Requests and Limits
|
||||
|
||||
- Check current resource allocation and utilization
|
||||
|
||||
- Make sure that all workloads have requests (and perhaps limits)
|
||||
|
||||
- Make sure that all *future* workloads too!
|
||||
@@ -1,55 +0,0 @@
|
||||
# Exercise — Requests and Limits
|
||||
|
||||
By default, if we don't specify *resource requests*,
|
||||
our workloads will run in `BestEffort` quality of service.
|
||||
|
||||
`BestEffort` is very bad for production workloads,
|
||||
because the scheduler has no idea of the actual resource
|
||||
requirements of our apps, and won't be able to make
|
||||
smart decisions about workload placement.
|
||||
|
||||
As a result, when the cluster gets overloaded,
|
||||
containers will be killed, pods will be evicted,
|
||||
and service disruptions will happen.
|
||||
|
||||
Let's solve this!
|
||||
|
||||
---
|
||||
|
||||
## Check current state
|
||||
|
||||
- Check *allocations*
|
||||
|
||||
(i.e. which pods have requests and limits for CPU and memory)
|
||||
|
||||
- Then check *utilization*
|
||||
|
||||
(i.e. actual resource usage)
|
||||
|
||||
- Possible tools: `kubectl`, plugins like `view-allocations`, Prometheus...
|
||||
|
||||
---
|
||||
|
||||
## Follow best practices
|
||||
|
||||
- We want to make sure that *all* workloads have requests
|
||||
|
||||
(and perhaps limits, too!)
|
||||
|
||||
- Depending on the workload:
|
||||
|
||||
- edit its YAML manifest
|
||||
|
||||
- adjust its Helm values
|
||||
|
||||
- add LimitRange in its Namespace
|
||||
|
||||
- Then check again to confirm that the job has been done properly!
|
||||
|
||||
---
|
||||
|
||||
## Be future-proof!
|
||||
|
||||
- We want to make sure that *future* workloads will have requests, too
|
||||
|
||||
- How can that be implemented?
|
||||
@@ -1,4 +1,4 @@
|
||||
# Exercise — Sealed Secrets
|
||||
# Exercise — Sealed Secrets (and more RBAC!)
|
||||
|
||||
This is a "combo exercise" to practice the following concepts:
|
||||
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 24 septembre 2024</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 25 septembre 2024</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 26 septembre 2024</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 27 septembre 2024</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mardi 1er octobre 2024</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 2 octobre 2024</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 3 octobre 2024</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 4 octobre 2024</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Lundi 7 octobre 2024</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 8 octobre 2024</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 9 octobre 2024</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 10 octobre 2024</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Vendredi 11 octobre 2024</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 14 octobre 2024</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 15 octobre 2024</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td>Mercredi 16 octobre 2024</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 17 octobre 2024</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Vendredi 18 octobre 2024</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 103 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 22 KiB |
@@ -1,16 +1,16 @@
|
||||
https://prettypictures.container.training/containers/Container-Ship-Freighter-Navigation-Elbe-Romance-1782991.jpg
|
||||
https://prettypictures.container.training/containers/ShippingContainerSFBay.jpg
|
||||
https://prettypictures.container.training/containers/aerial-view-of-containers.jpg
|
||||
https://prettypictures.container.training/containers/blue-containers.jpg
|
||||
https://prettypictures.container.training/containers/chinook-helicopter-container.jpg
|
||||
https://prettypictures.container.training/containers/container-cranes.jpg
|
||||
https://prettypictures.container.training/containers/container-housing.jpg
|
||||
https://prettypictures.container.training/containers/containers-by-the-water.jpg
|
||||
https://prettypictures.container.training/containers/distillery-containers.jpg
|
||||
https://prettypictures.container.training/containers/lots-of-containers.jpg
|
||||
https://prettypictures.container.training/containers/plastic-containers.JPG
|
||||
https://prettypictures.container.training/containers/train-of-containers-1.jpg
|
||||
https://prettypictures.container.training/containers/train-of-containers-2.jpg
|
||||
https://prettypictures.container.training/containers/two-containers-on-a-truck.jpg
|
||||
https://prettypictures.container.training/containers/wall-of-containers.jpeg
|
||||
https://prettypictures.container.training/containers/catene-de-conteneurs.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/Container-Ship-Freighter-Navigation-Elbe-Romance-1782991.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/ShippingContainerSFBay.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/aerial-view-of-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/blue-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/chinook-helicopter-container.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/container-cranes.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/container-housing.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/containers-by-the-water.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/distillery-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/lots-of-containers.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/plastic-containers.JPG
|
||||
https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-1.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/train-of-containers-2.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/two-containers-on-a-truck.jpg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/wall-of-containers.jpeg
|
||||
https://gallant-turing-d0d520.netlify.com/containers/catene-de-conteneurs.jpg
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Buildkit.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,73 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,81 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- shared/yaml.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Buildkit.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
@@ -20,21 +20,19 @@
|
||||
|
||||
## Use cases
|
||||
|
||||
- Defaulting
|
||||
Some examples ...
|
||||
|
||||
*injecting image pull secrets, sidecars, environment variables...*
|
||||
- Stand-alone admission controllers
|
||||
|
||||
- Policy enforcement and best practices
|
||||
*validating:* policy enforcement (e.g. quotas, naming conventions ...)
|
||||
|
||||
*prevent: `latest` images, deprecated APIs...*
|
||||
*mutating:* inject or provide default values (e.g. pod presets)
|
||||
|
||||
*require: PDBs, resource requests/limits, labels/annotations, local registry...*
|
||||
- Admission controllers part of a greater system
|
||||
|
||||
- Problem mitigation
|
||||
*validating:* advanced typing for operators
|
||||
|
||||
*block nodes with vulnerable kernels, inject log4j mitigations...*
|
||||
|
||||
- Extended validation for operators
|
||||
*mutating:* inject sidecars for service meshes
|
||||
|
||||
---
|
||||
|
||||
@@ -200,64 +198,6 @@
|
||||
|
||||
(the Node "echo" app, the Flask app, and one ngrok tunnel for each of them)
|
||||
|
||||
- We will need an ngrok account for the tunnels
|
||||
|
||||
(a free account is fine)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's ngrok?
|
||||
|
||||
- Ngrok provides secure tunnels to access local services
|
||||
|
||||
- Example: run `ngrok http 1234`
|
||||
|
||||
- `ngrok` will display a publicly-available URL (e.g. https://xxxxyyyyzzzz.ngrok.app)
|
||||
|
||||
- Connections to https://xxxxyyyyzzzz.ngrok.app will terminate at `localhost:1234`
|
||||
|
||||
- Basic product is free; extra features (vanity domains, end-to-end TLS...) for $$$
|
||||
|
||||
- Perfect to develop our webhook!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Ngrok in production
|
||||
|
||||
- Ngrok was initially known for its local webhook development features
|
||||
|
||||
- It now supports production scenarios as well
|
||||
|
||||
(load balancing, WAF, authentication, circuit-breaking...)
|
||||
|
||||
- Including some that are very relevant to Kubernetes
|
||||
|
||||
(e.g. [ngrok Ingress Controller](https://github.com/ngrok/kubernetes-ingress-controller)
|
||||
|
||||
---
|
||||
|
||||
## Ngrok tokens
|
||||
|
||||
- If you're attending a live training, you might have an ngrok token
|
||||
|
||||
- Look in `~/ngrok.env` and if that file exists, copy it to the stack:
|
||||
|
||||
.lab[
|
||||
|
||||
```bash
|
||||
cp ~/ngrok.env ~/container.training/webhooks/admission/.env
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Starting the whole stack
|
||||
|
||||
.lab[
|
||||
|
||||
- Go to the webhook directory:
|
||||
@@ -276,6 +216,28 @@ cp ~/ngrok.env ~/container.training/webhooks/admission/.env
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's ngrok?
|
||||
|
||||
- Ngrok provides secure tunnels to access local services
|
||||
|
||||
- Example: run `ngrok http 1234`
|
||||
|
||||
- `ngrok` will display a publicly-available URL (e.g. https://xxxxyyyyzzzz.ngrok.io)
|
||||
|
||||
- Connections to https://xxxxyyyyzzzz.ngrok.io will terminate at `localhost:1234`
|
||||
|
||||
- Basic product is free; extra features (vanity domains, end-to-end TLS...) for $$$
|
||||
|
||||
- Perfect to develop our webhook!
|
||||
|
||||
- Probably not for production, though
|
||||
|
||||
(webhook requests and responses now pass through the ngrok platform)
|
||||
|
||||
---
|
||||
|
||||
## Update the webhook configuration
|
||||
|
||||
- We have a webhook configuration in `k8s/webhook-configuration.yaml`
|
||||
|
||||
@@ -141,6 +141,12 @@ class: pic
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
@@ -151,12 +157,6 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
# The Kubernetes API
|
||||
|
||||
[
|
||||
|
||||
@@ -1,601 +0,0 @@
|
||||
# ArgoCD
|
||||
|
||||
- We're going to implement a basic GitOps workflow with ArgoCD
|
||||
|
||||
- Pushing to the default branch will automatically deploy to our clusters
|
||||
|
||||
- There will be two clusters (`dev` and `prod`)
|
||||
|
||||
- The two clusters will have similar (but slightly different) workloads
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## ArgoCD concepts
|
||||
|
||||
ArgoCD manages **applications** by **syncing** their **live state** with their **target state**.
|
||||
|
||||
- **Application**: a group of Kubernetes resources managed by ArgoCD.
|
||||
<br/>
|
||||
Also a custom resource (`kind: Application`) managing that group of resources.
|
||||
|
||||
- **Application source type**: the **Tool** used to build the application (Kustomize, Helm...)
|
||||
|
||||
- **Target state**: the desired state of an **application**, as represented by the git repository.
|
||||
|
||||
- **Live state**: the current state of the application on the cluster.
|
||||
|
||||
- **Sync status**: whether or not the live state matches the target state.
|
||||
|
||||
- **Sync**: the process of making an application move to its target state.
|
||||
<br/>
|
||||
(e.g. by applying changes to a Kubernetes cluster)
|
||||
|
||||
(Check [ArgoCD core concepts](https://argo-cd.readthedocs.io/en/stable/core_concepts/) for more definitions!)
|
||||
|
||||
---
|
||||
|
||||
## Getting ready
|
||||
|
||||
- Let's make sure we have two clusters
|
||||
|
||||
- It's OK to use local clusters (kind, minikube...)
|
||||
|
||||
- We need to install the ArgoCD CLI ([packages], [binaries])
|
||||
|
||||
- **Highly recommended:** set up CLI completion!
|
||||
|
||||
- Of course we'll need a Git service, too
|
||||
|
||||
[packages]: https://argo-cd.readthedocs.io/en/stable/cli_installation/
|
||||
[binaries]: https://github.com/argoproj/argo-cd/releases/latest
|
||||
|
||||
---
|
||||
|
||||
## Setting up ArgoCD
|
||||
|
||||
- The easiest way is to use upstream YAML manifests
|
||||
|
||||
- There is also a [Helm chart][argohelmchart] if we need more customization
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a namespace for ArgoCD and install it there:
|
||||
```bash
|
||||
kubectl create namespace argocd
|
||||
kubectl apply --namespace argocd -f \
|
||||
https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
[argohelmchart]: https://artifacthub.io/packages/helm/argo/argocd-apps
|
||||
|
||||
---
|
||||
|
||||
## Logging in with the ArgoCD CLI
|
||||
|
||||
- The CLI can talk to the ArgoCD API server or to the Kubernetes API server
|
||||
|
||||
- For simplicity, we're going to authenticate and communicate with the Kubernetes API
|
||||
|
||||
.lab[
|
||||
|
||||
- Authenticate with the ArgoCD API (that's what the `--core` flag does):
|
||||
```bash
|
||||
argocd login --core
|
||||
```
|
||||
|
||||
- Check that everything is fine:
|
||||
```bash
|
||||
argocd version
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 `FATA[0000] error retrieving argocd-cm: configmap "argocd-cm" not found`
|
||||
|
||||
---
|
||||
|
||||
## ArgoCD CLI shortcomings
|
||||
|
||||
- When using "core" authentication, the ArgoCD CLI uses our current Kubernetes context
|
||||
|
||||
(as defined in our kubeconfig file)
|
||||
|
||||
- That context need to point to the correct namespace
|
||||
|
||||
(the namespace where we installed ArgoCD)
|
||||
|
||||
- In fact, `argocd login --core` doesn't communicate at all with ArgoCD!
|
||||
|
||||
(it only updates a local ArgoCD configuration file)
|
||||
|
||||
---
|
||||
|
||||
## Trying again in the right namespace
|
||||
|
||||
- We will need to run all `argocd` commands in the `argocd` namespace
|
||||
|
||||
(this limitation only applies to "core" authentication; see [issue 14167][issue14167])
|
||||
|
||||
.lab[
|
||||
|
||||
- Switch to the `argocd` namespace:
|
||||
```bash
|
||||
kubectl config set-context --current --namespace argocd
|
||||
```
|
||||
|
||||
- Check that we can communicate with the ArgoCD API now:
|
||||
```bash
|
||||
argocd version
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- Let's have a look at ArgoCD architecture!
|
||||
|
||||
[issue14167]: https://github.com/argoproj/argo-cd/issues/14167
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## ArgoCD API Server
|
||||
|
||||
The API server is a gRPC/REST server which exposes the API consumed by the Web UI, CLI, and CI/CD systems. It has the following responsibilities:
|
||||
|
||||
- application management and status reporting
|
||||
|
||||
- invoking of application operations (e.g. sync, rollback, user-defined actions)
|
||||
|
||||
- repository and cluster credential management (stored as K8s secrets)
|
||||
|
||||
- authentication and auth delegation to external identity providers
|
||||
|
||||
- RBAC enforcement
|
||||
|
||||
- listener/forwarder for Git webhook events
|
||||
|
||||
---
|
||||
|
||||
## ArgoCD Repository Server
|
||||
|
||||
The repository server is an internal service which maintains a local cache of the Git repositories holding the application manifests. It is responsible for generating and returning the Kubernetes manifests when provided the following inputs:
|
||||
|
||||
- repository URL
|
||||
|
||||
- revision (commit, tag, branch)
|
||||
|
||||
- application path
|
||||
|
||||
- template specific settings: parameters, helm values...
|
||||
|
||||
---
|
||||
|
||||
## ArgoCD Application Controller
|
||||
|
||||
The application controller is a Kubernetes controller which continuously monitors running applications and compares the current, live state against the desired target state (as specified in the repo).
|
||||
|
||||
It detects *OutOfSync* application state and optionally takes corrective action.
|
||||
|
||||
It is responsible for invoking any user-defined hooks for lifecycle events (*PreSync, Sync, PostSync*).
|
||||
|
||||
---
|
||||
|
||||
## Preparing a repository for ArgoCD
|
||||
|
||||
- We need a repository with Kubernetes YAML manifests
|
||||
|
||||
- You can fork [kubercoins] or create a new, empty repository
|
||||
|
||||
- If you create a new, empty repository, add some manifests to it
|
||||
|
||||
[kubercoins]: https://github.com/jpetazzo/kubercoins
|
||||
|
||||
---
|
||||
|
||||
## Add an Application
|
||||
|
||||
- An Application can be added to ArgoCD via the web UI or the CLI
|
||||
|
||||
(either way, this will create a custom resource of `kind: Application`)
|
||||
|
||||
- The Application should then automatically be deployed to our cluster
|
||||
|
||||
(the application manifests will be "applied" to the cluster)
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's use the CLI to add an Application:
|
||||
```bash
|
||||
argocd app create kubercoins \
|
||||
--repo https://github.com/`<your_user>/<your_repo>`.git \
|
||||
--path . --revision `<branch>` \
|
||||
--dest-server https://kubernetes.default.svc \
|
||||
--dest-namespace kubercoins-prod
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking progress
|
||||
|
||||
- We can see sync status in the web UI or with the CLI
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's check app status with the CLI:
|
||||
```bash
|
||||
argocd app list
|
||||
```
|
||||
|
||||
- We can also check directly with the Kubernetes CLI:
|
||||
```bash
|
||||
kubectl get applications
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The app is there and it is `OutOfSync`!
|
||||
|
||||
---
|
||||
|
||||
## Manual sync with the CLI
|
||||
|
||||
- By default the "sync policy" is `manual`
|
||||
|
||||
- It can also be set to `auto`, which would check the git repository every 3 minutes
|
||||
|
||||
(this interval can be [configured globally][pollinginterval])
|
||||
|
||||
- Manual sync can be triggered with the CLI
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's force an immediate sync of our app:
|
||||
```bash
|
||||
argocd app sync kubercoins
|
||||
```
|
||||
]
|
||||
|
||||
🤔 We're getting errors!
|
||||
|
||||
[pollinginterval]: https://argo-cd.readthedocs.io/en/stable/faq/#how-often-does-argo-cd-check-for-changes-to-my-git-or-helm-repository
|
||||
|
||||
---
|
||||
|
||||
## Sync failed
|
||||
|
||||
We should receive a failure:
|
||||
|
||||
`FATA[0000] Operation has completed with phase: Failed`
|
||||
|
||||
And in the output, we see more details:
|
||||
|
||||
`Message: one or more objects failed to apply,`
|
||||
<br/>
|
||||
`reason: namespaces "kubercoins-prod" not found`
|
||||
|
||||
---
|
||||
|
||||
## Creating the namespace
|
||||
|
||||
- There are multiple ways to achieve that
|
||||
|
||||
- We could generate a YAML manifest for the namespace and add it to the git repository
|
||||
|
||||
- Or we could use "Sync Options" so that ArgoCD creates it automatically!
|
||||
|
||||
- ArgoCD provides many "Sync Options" to handle various edge cases
|
||||
|
||||
- Some [others](https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/) are: `FailOnSharedResource`, `PruneLast`, `PrunePropagationPolicy`...
|
||||
|
||||
---
|
||||
|
||||
## Editing the app's sync options
|
||||
|
||||
- This can be done through the web UI or the CLI
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's use the CLI once again:
|
||||
```bash
|
||||
argocd app edit kubercoins
|
||||
```
|
||||
|
||||
- Add the following to the YAML manifest, at the root level:
|
||||
```yaml
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Sync again
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's retry the sync operation:
|
||||
```bash
|
||||
argocd app sync kubercoins
|
||||
```
|
||||
|
||||
- And check the application status:
|
||||
```bash
|
||||
argocd app list
|
||||
kubectl get applications
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- It should show `Synced` and `Progressing`
|
||||
|
||||
- After a while (when all pods are running correctly) it should be `Healthy`
|
||||
|
||||
---
|
||||
|
||||
## Managing Applications via the Web UI
|
||||
|
||||
- ArgoCD is popular in large part due to its browser-based UI
|
||||
|
||||
- Let's see how to manage Applications in the web UI
|
||||
|
||||
.lab[
|
||||
|
||||
- Expose the web dashboard on a local port:
|
||||
```bash
|
||||
argocd admin dashboard
|
||||
```
|
||||
|
||||
- This command will show the dashboard URL; open it in a browser
|
||||
|
||||
- Authentication should be automatic
|
||||
|
||||
]
|
||||
|
||||
Note: `argocd admin dashboard` is similar to `kubectl port-forward` or `kubectl-proxy`.
|
||||
|
||||
(The dashboard remains available as long as `argocd admin dashboard` is running.)
|
||||
|
||||
---
|
||||
|
||||
## Adding a staging Application
|
||||
|
||||
- Let's add another Application for a staging environment
|
||||
|
||||
- First, create a new branch (e.g. `staging`) in our kubercoins fork
|
||||
|
||||
- Then, in the ArgoCD web UI, click on the "+ NEW APP" button
|
||||
|
||||
(on a narrow display, it might just be "+", right next to buttons looking like 🔄 and ↩️)
|
||||
|
||||
- See next slides for details about that form!
|
||||
|
||||
---
|
||||
|
||||
## Defining the Application
|
||||
|
||||
| Field | Value |
|
||||
|------------------|--------------------------------------------|
|
||||
| Application Name | `kubercoins-stg` |
|
||||
| Project Name | `default` |
|
||||
| Sync policy | `Manual` |
|
||||
| Sync options | check `auto-create namespace` |
|
||||
| Repository URL | `https://github.com/<username>/<reponame>` |
|
||||
| Revision | `<branchname>` |
|
||||
| Path | `.` |
|
||||
| Cluster URL | `https://kubernetes.default.svc` |
|
||||
| Namespace | `kubercoins-stg` |
|
||||
|
||||
Then click on the "CREATE" button (top left).
|
||||
|
||||
---
|
||||
|
||||
## Synchronizing the Application
|
||||
|
||||
- After creating the app, it should now show up in the app tiles
|
||||
|
||||
(with a yellow outline to indicate that it's out of sync)
|
||||
|
||||
- Click on the "SYNC" button on the app tile to show the sync panel
|
||||
|
||||
- In the sync panel, click on "SYNCHRONIZE"
|
||||
|
||||
- The app will start to synchronize, and should become healthy after a little while
|
||||
|
||||
---
|
||||
|
||||
## Making changes
|
||||
|
||||
- Let's make changes to our application manifests and see what happens
|
||||
|
||||
.lab[
|
||||
|
||||
- Make a change to a manifest
|
||||
|
||||
(for instance, change the number of replicas of a Deployment)
|
||||
|
||||
- Commit that change and push it to the staging branch
|
||||
|
||||
- Check the application sync status:
|
||||
```bash
|
||||
argocd app list
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- After a short period of time (a few minutes max) the app should show up "out of sync"
|
||||
|
||||
---
|
||||
|
||||
## Automated synchronization
|
||||
|
||||
- We don't want to manually sync after every change
|
||||
|
||||
(that wouldn't be true continuous deployment!)
|
||||
|
||||
- We're going to enable "auto sync"
|
||||
|
||||
- Note that this requires much more rigorous testing and observability!
|
||||
|
||||
(we need to be sure that our changes won't crash our app or even our cluster)
|
||||
|
||||
- Argo project also provides [Argo Rollouts][rollouts]
|
||||
|
||||
(a controller and CRDs to provide blue-green, canary deployments...)
|
||||
|
||||
- Today we'll just turn on automated sync for the staging namespace
|
||||
|
||||
[rollouts]: https://argoproj.github.io/rollouts/
|
||||
|
||||
---
|
||||
|
||||
## Enabling auto-sync
|
||||
|
||||
- In the web UI, go to *Applications* and click on *kubercoins-stg*
|
||||
|
||||
- Click on the "DETAILS" button (top left, might be just a "i" sign on narrow displays)
|
||||
|
||||
- Click on "ENABLE AUTO-SYNC" (under "SYNC POLICY")
|
||||
|
||||
- After a few minutes the changes should show up!
|
||||
|
||||
---
|
||||
|
||||
## Rolling back
|
||||
|
||||
- If we deploy a broken version, how do we recover?
|
||||
|
||||
- "The GitOps way": revert the changes in source control
|
||||
|
||||
(see next slide)
|
||||
|
||||
- Emergency rollback:
|
||||
|
||||
- disable auto-sync (if it was enabled)
|
||||
|
||||
- on the app page, click on "HISTORY AND ROLLBACK"
|
||||
<br/>
|
||||
(with the clock-with-backward-arrow icon)
|
||||
|
||||
- click on the "..." button next to the button we want to roll back to
|
||||
|
||||
- click "Rollback" and confirm
|
||||
|
||||
---
|
||||
|
||||
## Rolling back with GitOps
|
||||
|
||||
- The correct way to roll back is rolling back the code in source control
|
||||
|
||||
```bash
|
||||
git checkout staging
|
||||
git revert HEAD
|
||||
git push origin staging
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Working with Helm
|
||||
|
||||
- ArgoCD supports different tools to process Kubernetes manifests:
|
||||
|
||||
Kustomize, Helm, Jsonnet, and [Config Management Plugins][cmp]
|
||||
|
||||
- Let's how to deploy Helm charts with ArgoCD!
|
||||
|
||||
- In the [kubercoins] repository, there is a branch called [helm]
|
||||
|
||||
- It provides a generic Helm chart, in the [generic-service] directory
|
||||
|
||||
- There are service-specific values YAML files in the [values] directory
|
||||
|
||||
- Let's create one application for each of the 5 components of our app!
|
||||
|
||||
[cmp]: https://argo-cd.readthedocs.io/en/stable/operator-manual/config-management-plugins/
|
||||
[kubercoins]: https://github.com/jpetazzo/kubercoins
|
||||
[helm]: https://github.com/jpetazzo/kubercoins/tree/helm
|
||||
[generic-service]: https://github.com/jpetazzo/kubercoins/tree/helm/generic-service
|
||||
[values]: https://github.com/jpetazzo/kubercoins/tree/helm/values
|
||||
|
||||
---
|
||||
|
||||
## Creating a Helm Application
|
||||
|
||||
- The example below uses "upstream" kubercoins
|
||||
|
||||
- Feel free to use your own fork instead!
|
||||
|
||||
.lab[
|
||||
|
||||
- Create an Application for `hasher`:
|
||||
```bash
|
||||
argocd app create hasher \
|
||||
--repo https://github.com/jpetazzo/kubercoins.git \
|
||||
--path generic-service --revision helm \
|
||||
--dest-server https://kubernetes.default.svc \
|
||||
--dest-namespace kubercoins-helm \
|
||||
--sync-option CreateNamespace=true \
|
||||
--values ../values/hasher.yaml \
|
||||
--sync-policy=auto
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the rest of the application
|
||||
|
||||
- Option 1: repeat the previous command (updating app name and values)
|
||||
|
||||
- Option 2: author YAML manifests and apply them
|
||||
|
||||
---
|
||||
|
||||
## Additional considerations
|
||||
|
||||
- When running in production, ArgoCD can be integrated with an [SSO provider][sso]
|
||||
|
||||
- ArgoCD embeds and bundles [Dex] to delegate authentication
|
||||
|
||||
- it can also use an existing OIDC provider (Okta, Keycloak...)
|
||||
|
||||
- A single ArgoCD instance can manage multiple clusters
|
||||
|
||||
(but it's also fine to have one ArgoCD per cluster)
|
||||
|
||||
- ArgoCD can be complemented with [Argo Rollouts][rollouts] for advanced rollout control
|
||||
|
||||
(blue/green, canary...)
|
||||
|
||||
[sso]: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sso
|
||||
[Dex]: https://github.com/dexidp/dex
|
||||
[rollouts]: https://argoproj.github.io/argo-rollouts/
|
||||
|
||||
---
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Many thanks to
|
||||
Anton (Ant) Weiss ([antweiss.com](https://antweiss.com), [@antweiss](https://twitter.com/antweiss))
|
||||
and
|
||||
Guilhem Lettron
|
||||
for contributing an initial version and suggestions to this ArgoCD chapter.
|
||||
|
||||
All remaining typos, mistakes, or approximations are mine (Jérôme Petazzoni).
|
||||
|
||||
???
|
||||
|
||||
:EN:- Implementing gitops with ArgoCD
|
||||
:FR:- Workflow gitops avec ArgoCD
|
||||
@@ -81,7 +81,7 @@
|
||||
|
||||
## What version are we running anyway?
|
||||
|
||||
- When I say, "I'm running Kubernetes 1.28", is that the version of:
|
||||
- When I say, "I'm running Kubernetes 1.22", is that the version of:
|
||||
|
||||
- kubectl
|
||||
|
||||
@@ -129,15 +129,15 @@
|
||||
|
||||
## Kubernetes uses semantic versioning
|
||||
|
||||
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.28.9:
|
||||
- Kubernetes versions look like MAJOR.MINOR.PATCH; e.g. in 1.22.17:
|
||||
|
||||
- MAJOR = 1
|
||||
- MINOR = 28
|
||||
- PATCH = 9
|
||||
- MINOR = 22
|
||||
- PATCH = 17
|
||||
|
||||
- It's always possible to mix and match different PATCH releases
|
||||
|
||||
(e.g. 1.28.9 and 1.28.13 are compatible)
|
||||
(e.g. 1.22.17 and 1.22.5 are compatible)
|
||||
|
||||
- It is recommended to run the latest PATCH release
|
||||
|
||||
@@ -153,9 +153,9 @@
|
||||
|
||||
- All components support a difference of one¹ MINOR version
|
||||
|
||||
- This allows live upgrades (since we can mix e.g. 1.28 and 1.29)
|
||||
- This allows live upgrades (since we can mix e.g. 1.22 and 1.23)
|
||||
|
||||
- It also means that going from 1.28 to 1.30 requires going through 1.29
|
||||
- It also means that going from 1.22 to 1.24 requires going through 1.23
|
||||
|
||||
.footnote[¹Except kubelet, which can be up to two MINOR behind API server,
|
||||
and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
@@ -254,7 +254,7 @@ and kubectl, which can be one MINOR ahead or behind API server.]
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
- Look for the `image:` line, and update it to e.g. `v1.30.1`
|
||||
- Look for the `image:` line, and update it to e.g. `v1.24.1`
|
||||
|
||||
]
|
||||
|
||||
@@ -320,29 +320,53 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
- First things first: we need to upgrade kubeadm
|
||||
|
||||
- The Kubernetes package repositories are now split by minor versions
|
||||
.lab[
|
||||
|
||||
(i.e. there is one repository for 1.28, another for 1.29, etc.)
|
||||
- Upgrade kubeadm:
|
||||
```
|
||||
sudo apt install kubeadm=1.27.0-00
|
||||
```
|
||||
|
||||
- This avoids accidentally upgrading from one minor version to another
|
||||
- Check what kubeadm tells us:
|
||||
```
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
(e.g. with unattended upgrades or if packages haven't been held/pinned)
|
||||
]
|
||||
|
||||
- We'll need to add the new package repository and unpin packages!
|
||||
Problem: kubeadm doesn't know know how to handle
|
||||
upgrades from version 1.22.
|
||||
|
||||
This is because we installed version 1.27.
|
||||
|
||||
We need to install kubeadm version 1.23.X.
|
||||
|
||||
---
|
||||
|
||||
## Installing the new packages
|
||||
## Downgrading kubeadm
|
||||
|
||||
- Edit `/etc/apt/sources.list.d/kubernetes.list`
|
||||
- We need to go back to kubeadm version 1.23.X.
|
||||
|
||||
(or copy it to e.g. `kubernetes-1.29.list` and edit that)
|
||||
.lab[
|
||||
|
||||
- `apt-get update`
|
||||
- View available versions for package `kubeadm`:
|
||||
```bash
|
||||
apt show kubeadm -a | grep ^Version | grep 1.23
|
||||
```
|
||||
|
||||
- Now edit (or remove) `/etc/apt/preferences.d/kubernetes`
|
||||
- Downgrade kubeadm:
|
||||
```
|
||||
sudo apt install kubeadm=1.23.0-00
|
||||
```
|
||||
|
||||
- `apt-get install kubeadm` should now upgrade `kubeadm` correctly! 🎉
|
||||
- Check what kubeadm tells us:
|
||||
```
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
kubeadm should now agree to upgrade to 1.23.X.
|
||||
|
||||
---
|
||||
|
||||
@@ -361,7 +385,7 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
- Look for the `image:` line, and restore it to the original value
|
||||
|
||||
(e.g. `v1.28.9`)
|
||||
(e.g. `v1.22.17`)
|
||||
|
||||
- Wait for the control plane to come back up
|
||||
|
||||
@@ -375,14 +399,9 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the upgrade plan:
|
||||
```bash
|
||||
sudo kubeadm upgrade plan
|
||||
```
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.29.0
|
||||
sudo kubeadm upgrade apply v1.23.0
|
||||
```
|
||||
|
||||
]
|
||||
@@ -399,9 +418,15 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
- Log into node `oldversion2`
|
||||
|
||||
- Update package lists and APT pins like we did before
|
||||
- View available versions for package `kubelet`:
|
||||
```bash
|
||||
apt show kubelet -a | grep ^Version
|
||||
```
|
||||
|
||||
- Then upgrade kubelet
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
sudo apt install kubelet=1.23.0-00
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -454,16 +479,13 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
.lab[
|
||||
|
||||
- Execute the whole upgrade procedure on each node:
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh oldversion$N "
|
||||
sudo sed -i s/1.28/1.29/ /etc/apt/sources.list.d/kubernetes.list &&
|
||||
sudo rm /etc/apt/preferences.d/kubernetes &&
|
||||
sudo apt update &&
|
||||
sudo apt install kubeadm -y &&
|
||||
sudo apt install kubeadm=1.23.0-00 &&
|
||||
sudo kubeadm upgrade node &&
|
||||
sudo apt install kubelet -y"
|
||||
sudo apt install kubelet=1.23.0-00"
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -472,7 +494,7 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.29
|
||||
- All our nodes should now be updated to version 1.23.0
|
||||
|
||||
.lab[
|
||||
|
||||
@@ -565,35 +587,17 @@ Note 2: kubeadm itself is still version 1.22.1..
|
||||
|
||||
---
|
||||
|
||||
## Database operators to the rescue
|
||||
|
||||
- Moving stateful pods (e.g.: database server) can cause downtime
|
||||
|
||||
- Database replication can help:
|
||||
|
||||
- if a node contains database servers, we make sure these servers aren't primaries
|
||||
|
||||
- if they are primaries, we execute a *switch over*
|
||||
|
||||
- Some database operators (e.g. [CNPG]) will do that switch over automatically
|
||||
|
||||
(when they detect that a node has been *cordoned*)
|
||||
|
||||
[CNPG]: https://cloudnative-pg.io/
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Skipping versions
|
||||
|
||||
- This example worked because we went from 1.28 to 1.29
|
||||
- This example worked because we went from 1.22 to 1.23
|
||||
|
||||
- If you are upgrading from e.g. 1.26, you will have to go through 1.27 first
|
||||
- If you are upgrading from e.g. 1.21, you will have to go through 1.22 first
|
||||
|
||||
- This means upgrading kubeadm to 1.27.X, then using it to upgrade the cluster
|
||||
- This means upgrading kubeadm to 1.22.X, then using it to upgrade the cluster
|
||||
|
||||
- Then upgrading kubeadm to 1.28.X, etc.
|
||||
- Then upgrading kubeadm to 1.23.X, etc.
|
||||
|
||||
- **Make sure to read the release notes before upgrading!**
|
||||
|
||||
|
||||
@@ -24,32 +24,6 @@
|
||||
|
||||
---
|
||||
|
||||
## A bit of history
|
||||
|
||||
Things related to Custom Resource Definitions:
|
||||
|
||||
- Kubernetes 1.??: `apiextensions.k8s.io/v1beta1` introduced
|
||||
|
||||
- Kubernetes 1.16: `apiextensions.k8s.io/v1` introduced
|
||||
|
||||
- Kubernetes 1.22: `apiextensions.k8s.io/v1beta1` [removed][changes-in-122]
|
||||
|
||||
- Kubernetes 1.25: [CEL validation rules available in beta][crd-validation-rules-beta]
|
||||
|
||||
- Kubernetes 1.28: [validation ratcheting][validation-ratcheting] in [alpha][feature-gates]
|
||||
|
||||
- Kubernetes 1.29: [CEL validation rules available in GA][cel-validation-rules]
|
||||
|
||||
- Kubernetes 1.30: [validation ratcheting][validation-ratcheting] in [beta][feature-gates]; enabled by default
|
||||
|
||||
[crd-validation-rules-beta]: https://kubernetes.io/blog/2022/09/23/crd-validation-rules-beta/
|
||||
[cel-validation-rules]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules
|
||||
[validation-ratcheting]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4008-crd-ratcheting
|
||||
[feature-gates]: https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features
|
||||
[changes-in-122]: https://kubernetes.io/blog/2021/07/14/upcoming-changes-in-kubernetes-1-22/
|
||||
|
||||
---
|
||||
|
||||
## First slice of pizza
|
||||
|
||||
```yaml
|
||||
@@ -68,6 +42,8 @@ Things related to Custom Resource Definitions:
|
||||
|
||||
(a few optional things become mandatory, see [this guide](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#customresourcedefinition-v122) for details)
|
||||
|
||||
- `apiextensions.k8s.io/v1beta1` is available since Kubernetes 1.16
|
||||
|
||||
---
|
||||
|
||||
## Second slice of pizza
|
||||
@@ -120,9 +96,9 @@ The YAML below defines a resource using the CRD that we just created:
|
||||
kind: Pizza
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: hawaiian
|
||||
name: napolitana
|
||||
spec:
|
||||
toppings: [ cheese, ham, pineapple ]
|
||||
toppings: [ mozzarella ]
|
||||
```
|
||||
|
||||
.lab[
|
||||
@@ -138,7 +114,11 @@ spec:
|
||||
|
||||
## Type validation
|
||||
|
||||
- Recent versions of Kubernetes will issue errors about unknown fields
|
||||
- Older versions of Kubernetes will accept our pizza definition as is
|
||||
|
||||
- Newer versions, however, will issue warnings about unknown fields
|
||||
|
||||
(and if we use `--validate=false`, these fields will simply be dropped)
|
||||
|
||||
- We need to improve our OpenAPI schema
|
||||
|
||||
@@ -146,28 +126,6 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Creating a bland pizza
|
||||
|
||||
- Let's try to create a pizza anyway!
|
||||
|
||||
.lab[
|
||||
|
||||
- Only provide the most basic YAML manifest:
|
||||
```bash
|
||||
kubectl create -f- <<EOF
|
||||
kind: Pizza
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: hawaiian
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- That should work! (As long as we don't try to add pineapple😁)
|
||||
|
||||
---
|
||||
|
||||
## Third slice of pizza
|
||||
|
||||
- Let's add a full OpenAPI v3 schema to our Pizza CRD
|
||||
@@ -250,42 +208,24 @@ Note: we can update a CRD without having to re-create the corresponding resource
|
||||
|
||||
---
|
||||
|
||||
## Validation woes
|
||||
## Better data validation
|
||||
|
||||
- Let's check what happens if we try to update our pizzas
|
||||
- Let's change the data schema so that the sauce can only be `red` or `white`
|
||||
|
||||
- This will be implemented by @@LINK[k8s/pizza-5.yaml]
|
||||
|
||||
.lab[
|
||||
|
||||
- Try to add a label:
|
||||
- Update the Pizza CRD:
|
||||
```bash
|
||||
kubectl label pizza --all deliciousness=9001
|
||||
kubectl apply -f ~/container.training/k8s/pizza-5.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- It works for the pizzas that have `sauce` and `toppings`, but not the other one!
|
||||
|
||||
- The other one doesn't pass validation, and *can't be modified*
|
||||
|
||||
---
|
||||
|
||||
## First, let's fix this!
|
||||
|
||||
- Option 1: delete the pizza
|
||||
|
||||
*(deletion isn't subject to validation)*
|
||||
|
||||
- Option 2: update the pizza to add `sauce` and `toppings`
|
||||
|
||||
*(writing a pizza that passes validation is fine)*
|
||||
|
||||
- Option 3: relax the validation rules
|
||||
|
||||
---
|
||||
|
||||
## Next, explain what's happening
|
||||
## Validation *a posteriori*
|
||||
|
||||
- Some of the pizzas that we defined earlier *do not* pass validation
|
||||
|
||||
@@ -341,8 +281,6 @@ Note: we can update a CRD without having to re-create the corresponding resource
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Migrating database content
|
||||
|
||||
- We need to *serve* a version as long as we *store* objects in that version
|
||||
@@ -357,58 +295,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Validation ratcheting
|
||||
|
||||
- Good news: it's not always necessary to introduce new versions
|
||||
|
||||
(and to write the associated conversion webhooks)
|
||||
|
||||
- *Validation ratcheting allows updates to custom resources that fail validation to succeed if the validation errors were on unchanged keypaths*
|
||||
|
||||
- In other words: allow changes that don't introduce further validation errors
|
||||
|
||||
- This was introduced in Kubernetes 1.28 (alpha), enabled by default in 1.30 (beta)
|
||||
|
||||
- The rules are actually a bit more complex
|
||||
|
||||
- Another (maybe more accurate) explanation: allow to tighten or loosen some field definitions
|
||||
|
||||
---
|
||||
|
||||
## Validation ratcheting example
|
||||
|
||||
- Let's change the data schema so that the sauce can only be `red` or `white`
|
||||
|
||||
- This will be implemented by @@LINK[k8s/pizza-5.yaml]
|
||||
|
||||
.lab[
|
||||
|
||||
- Update the Pizza CRD:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/pizza-5.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing validation ratcheting
|
||||
|
||||
- This should work with Kubernetes 1.30 and above
|
||||
|
||||
(but give an error for the `brownie` pizza with previous versions of K8S)
|
||||
|
||||
.lab[
|
||||
|
||||
- Add another label:
|
||||
```bash
|
||||
kubectl label pizzas --all food=definitely
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Even better data validation
|
||||
|
||||
- If we need more complex data validation, we can use a validating webhook
|
||||
|
||||
@@ -46,11 +46,11 @@ In the real world...
|
||||
|
||||
- In Kubernetes, a "disruption" is something that stops the execution of a Pod
|
||||
|
||||
- There are **voluntary** and **involuntary** disruptions
|
||||
- There are **voluntary** and **unvoluntary** disruptions
|
||||
|
||||
- voluntary = directly initiated by humans (including by mistake!)
|
||||
|
||||
- involuntary = everything else
|
||||
- unvoluntary = everything else
|
||||
|
||||
- In this section, we're going to see what they are and how to prevent them
|
||||
|
||||
@@ -64,7 +64,7 @@ In the real world...
|
||||
|
||||
(includes kernel bugs, issues affecting underlying hypervisors or infrastructure...)
|
||||
|
||||
- **Involuntary** disruption (even if it results from human error!)
|
||||
- **Unvoluntary** disruption (even if it results from human error!)
|
||||
|
||||
- Consequence: all workloads on that node become unresponsive
|
||||
|
||||
@@ -116,7 +116,7 @@ In the real world...
|
||||
|
||||
(because a pod is using too much memory and no limit was set)
|
||||
|
||||
- **Involuntary** disruption
|
||||
- **Unvoluntary** disruption
|
||||
|
||||
- Consequence: kubelet starts to *evict* some pods
|
||||
|
||||
@@ -507,7 +507,7 @@ spec:
|
||||
|
||||
???
|
||||
|
||||
:EN:- Voluntary and involuntary disruptions
|
||||
:EN:- Voluntary and unvoluntary disruptions
|
||||
:EN:- Pod Disruption Budgets
|
||||
:FR:- "Disruptions" volontaires et involontaires
|
||||
:FR:- Pod Disruption Budgets
|
||||
|
||||
@@ -368,30 +368,6 @@ class: extra-details
|
||||
|
||||
[ciliumwithoutkubeproxy]: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/#kubeproxy-free
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## About the API server certificate...
|
||||
|
||||
- In the previous sections, we've skipped API server certificate verification
|
||||
|
||||
- To generate a proper certificate, we need to include a `subjectAltName` extension
|
||||
|
||||
- And make sure that the CA includes the extension in the certificate
|
||||
|
||||
```bash
|
||||
openssl genrsa -out apiserver.key 4096
|
||||
|
||||
openssl req -new -key apiserver.key -subj /CN=kubernetes/ \
|
||||
-addext "subjectAltName = DNS:kubernetes.default.svc, \
|
||||
DNS:kubernetes.default, DNS:kubernetes, \
|
||||
DNS:localhost, DNS:polykube1" -out apiserver.csr
|
||||
|
||||
openssl x509 -req -in apiserver.csr -CAkey ca.key -CA ca.cert \
|
||||
-out apiserver.crt -copy_extensions copy
|
||||
```
|
||||
|
||||
???
|
||||
|
||||
:EN:- Connecting nodes and pods
|
||||
|
||||
@@ -1,508 +0,0 @@
|
||||
# FluxCD
|
||||
|
||||
- We're going to implement a basic GitOps workflow with Flux
|
||||
|
||||
- Pushing to `main` will automatically deploy to the clusters
|
||||
|
||||
- There will be two clusters (`dev` and `prod`)
|
||||
|
||||
- The two clusters will have similar (but slightly different) workloads
|
||||
|
||||
---
|
||||
|
||||
## Repository structure
|
||||
|
||||
This is (approximately) what we're going to do:
|
||||
|
||||
```
|
||||
@@INCLUDE[slides/k8s/gitopstree.txt]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting ready
|
||||
|
||||
- Let's make sure we have two clusters
|
||||
|
||||
- It's OK to use local clusters (kind, minikube...)
|
||||
|
||||
- We might run into resource limits, though
|
||||
|
||||
(pay attention to `Pending` pods!)
|
||||
|
||||
- We need to install the Flux CLI ([packages], [binaries])
|
||||
|
||||
- **Highly recommended:** set up CLI completion!
|
||||
|
||||
- Of course we'll need a Git service, too
|
||||
|
||||
(we're going to use GitHub here)
|
||||
|
||||
[packages]: https://fluxcd.io/flux/get-started/
|
||||
[binaries]: https://github.com/fluxcd/flux2/releases
|
||||
|
||||
---
|
||||
|
||||
## GitHub setup
|
||||
|
||||
- Generate a GitHub token:
|
||||
|
||||
https://github.com/settings/tokens/new
|
||||
|
||||
- Give it "repo" access
|
||||
|
||||
- This token will be used by the `flux bootstrap github` command later
|
||||
|
||||
- It will create a repository and configure it (SSH key...)
|
||||
|
||||
- The token can be revoked afterwards
|
||||
|
||||
---
|
||||
|
||||
## Flux bootstrap
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's set a few variables for convenience, and create our repository:
|
||||
```bash
|
||||
export GITHUB_TOKEN=...
|
||||
export GITHUB_USER=changeme
|
||||
export GITHUB_REPO=alsochangeme
|
||||
export FLUX_CLUSTER=dev
|
||||
|
||||
flux bootstrap github \
|
||||
--owner=$GITHUB_USER \
|
||||
--repository=$GITHUB_REPO \
|
||||
--branch=main \
|
||||
--path=./clusters/$FLUX_CLUSTER \
|
||||
--personal --public
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Problems? check next slide!
|
||||
|
||||
---
|
||||
|
||||
## What could go wrong?
|
||||
|
||||
- `flux bootstrap` will create or update the repository on GitHub
|
||||
|
||||
- Then it will install Flux controllers to our cluster
|
||||
|
||||
- Then it waits for these controllers to be up and running and ready
|
||||
|
||||
- Check pod status in `flux-system`
|
||||
|
||||
- If pods are `Pending`, check that you have enough resources on your cluster
|
||||
|
||||
- For testing purposes, it should be fine to lower or remove Flux `requests`!
|
||||
|
||||
(but don't do that in production!)
|
||||
|
||||
- If anything goes wrong, don't worry, we can just re-run the bootstrap
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Idempotence
|
||||
|
||||
- It's OK to run that same `flux bootstrap` command multiple times!
|
||||
|
||||
- If the repository already exists, it will re-use it
|
||||
|
||||
(it won't destroy or empty it)
|
||||
|
||||
- If the path `./clusters/$FLUX_CLUSTER` already exists, it will update it
|
||||
|
||||
- It's totally fine to re-run `flux bootstrap` if something fails
|
||||
|
||||
- It's totally fine to run it multiple times on different clusters
|
||||
|
||||
- Or even to run it multiple times for the *same* cluster
|
||||
|
||||
(to reinstall Flux on that cluster after a cluster wipe / reinstall)
|
||||
|
||||
---
|
||||
|
||||
## What do we get?
|
||||
|
||||
- Let's look at what `flux bootstrap` installed on the cluster
|
||||
|
||||
.lab[
|
||||
|
||||
- Look inside the `flux-system` namespace:
|
||||
```bash
|
||||
kubectl get all --namespace flux-system
|
||||
```
|
||||
|
||||
- Look at `kustomizations` custom resources:
|
||||
```bash
|
||||
kubectl get kustomizations --all-namespaces
|
||||
```
|
||||
|
||||
- See what the `flux` CLI tells us:
|
||||
```bash
|
||||
flux get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying with GitOps
|
||||
|
||||
- We'll need to add/edit files on the repository
|
||||
|
||||
- We can do it by using `git clone`, local edits, `git commit`, `git push`
|
||||
|
||||
- Or by editing online on the GitHub website
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a manifest; for instance `clusters/dev/flux-system/blue.yaml`
|
||||
|
||||
- Add that manifest to `clusters/dev/kustomization.yaml`
|
||||
|
||||
- Commit and push both changes to the repository
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Waiting for reconciliation
|
||||
|
||||
- Compare the git hash that we pushed and the one shown with `kubectl get `
|
||||
|
||||
- Option 1: wait for Flux to pick up the changes in the repository
|
||||
|
||||
(the default interval for git repositories is 1 minute, so that's fast)
|
||||
|
||||
- Option 2: use `flux reconcile source git flux-system`
|
||||
|
||||
(this puts an annotation on the appropriate resource, triggering an immediate check)
|
||||
|
||||
- Option 3: set up receiver webhooks
|
||||
|
||||
(so that git updates trigger immediate reconciliation)
|
||||
|
||||
---
|
||||
|
||||
## Checking progress
|
||||
|
||||
- `flux logs`
|
||||
|
||||
- `kubectl get gitrepositories --all-namespaces`
|
||||
|
||||
- `kubectl get kustomizations --all-namespaces`
|
||||
|
||||
---
|
||||
|
||||
## Did it work?
|
||||
|
||||
--
|
||||
|
||||
- No!
|
||||
|
||||
--
|
||||
|
||||
- Why?
|
||||
|
||||
--
|
||||
|
||||
- We need to indicate the namespace where the app should be deployed
|
||||
|
||||
- Either in the YAML manifests
|
||||
|
||||
- Or in the `kustomization` custom resource
|
||||
|
||||
(using field `spec.targetNamespace`)
|
||||
|
||||
- Add the namespace to the manifest and try again!
|
||||
|
||||
---
|
||||
|
||||
## Adding an app in a reusable way
|
||||
|
||||
- Let's see a technique to add a whole app
|
||||
|
||||
(with multiple resource manifets)
|
||||
|
||||
- We want to minimize code repetition
|
||||
|
||||
(i.e. easy to add on multiple clusters with minimal changes)
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Add the app manifests in a directory
|
||||
|
||||
(e.g.: `apps/myappname/manifests`)
|
||||
|
||||
- Create a kustomization manifest for the app and its namespace
|
||||
|
||||
(e.g.: `apps/myappname/flux.yaml`)
|
||||
|
||||
- The kustomization manifest will refer to the app manifest
|
||||
|
||||
- Add the kustomization manifest to the top-level `flux-system` kustomization
|
||||
|
||||
---
|
||||
|
||||
## Creating the manifests
|
||||
|
||||
- All commands below should be executed at the root of the repository
|
||||
|
||||
.lab[
|
||||
|
||||
- Put application manifests in their directory:
|
||||
```bash
|
||||
mkdir -p apps/dockercoins
|
||||
cp ~/container.training/k8s/dockercoins.yaml apps/dockercoins/
|
||||
```
|
||||
|
||||
- Create kustomization manifest:
|
||||
```bash
|
||||
flux create kustomization dockercoins \
|
||||
--source=GitRepository/flux-system \
|
||||
--path=./apps/dockercoins/manifests/ \
|
||||
--target-namespace=dockercoins \
|
||||
--prune=true --export > apps/dockercoins/flux.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the target namespace
|
||||
|
||||
- When deploying *helm releases*, it is possible to automatically create the namespace
|
||||
|
||||
- When deploying *kustomizations*, we need to create it explicitly
|
||||
|
||||
- Let's put the namespace with the kustomization manifest
|
||||
|
||||
(so that the whole app can be mediated through a single manifest)
|
||||
|
||||
.lab[
|
||||
|
||||
- Add the target namespace to the kustomization manifest:
|
||||
```bash
|
||||
echo "---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: dockercoins" >> apps/dockercoins/flux.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Linking the kustomization manifest
|
||||
|
||||
- Edit `clusters/dev/flux-system/kustomization.yaml`
|
||||
|
||||
- Add a line to reference the kustomization manifest that we created:
|
||||
```yaml
|
||||
- ../../../apps/dockercoins/flux.yaml
|
||||
```
|
||||
|
||||
- `git add` our manifests, `git commit`, `git push`
|
||||
|
||||
(check with `git status` that we haven't forgotten anything!)
|
||||
|
||||
- `flux reconcile` or wait for the changes to be picked up
|
||||
|
||||
---
|
||||
|
||||
## Installing with Helm
|
||||
|
||||
- We're going to see two different workflows:
|
||||
|
||||
- installing a third-party chart
|
||||
<br/>
|
||||
(e.g. something we found on the Artifact Hub)
|
||||
|
||||
- installing one of our own charts
|
||||
<br/>
|
||||
(e.g. a chart we authored ourselves)
|
||||
|
||||
- The procedures are very similar
|
||||
|
||||
---
|
||||
|
||||
## Installing from a public Helm repository
|
||||
|
||||
- Let's install [kube-prometheus-stack][kps]
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the Flux manifests:
|
||||
```bash
|
||||
mkdir -p apps/kube-prometheus-stack
|
||||
flux create source helm kube-prometheus-stack \
|
||||
--url=https://prometheus-community.github.io/helm-charts \
|
||||
--export >> apps/kube-prometheus-stack/flux.yaml
|
||||
flux create helmrelease kube-prometheus-stack \
|
||||
--source=HelmRepository/kube-prometheus-stack \
|
||||
--chart=kube-prometheus-stack --release-name=kube-prometheus-stack \
|
||||
--target-namespace=kube-prometheus-stack --create-target-namespace \
|
||||
--export >> apps/kube-prometheus-stack/flux.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
[kps]: https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack
|
||||
|
||||
---
|
||||
|
||||
## Enable the app
|
||||
|
||||
- Just like before, link the manifest from the top-level kustomization
|
||||
|
||||
(`flux-system` in namespace `flux-system`)
|
||||
|
||||
- `git add` / `git commit` / `git push`
|
||||
|
||||
- We should now have a Prometheus+Grafana observability stack!
|
||||
|
||||
---
|
||||
|
||||
## Installing from a Helm chart in a git repo
|
||||
|
||||
- In this example, the chart will be in the same repo
|
||||
|
||||
- In the real world, it will typically be in a different repo!
|
||||
|
||||
.lab[
|
||||
|
||||
- Generate a basic Helm chart:
|
||||
```bash
|
||||
mkdir -p charts
|
||||
helm create charts/myapp
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(This generates a chart which installs NGINX. A lot of things can be customized, though.)
|
||||
|
||||
---
|
||||
|
||||
## Creating the Flux manifests
|
||||
|
||||
- The invocation is very similar to our first example
|
||||
|
||||
.lab[
|
||||
|
||||
- Generate the Flux manifest for the Helm release:
|
||||
```bash
|
||||
mkdir apps/myapp
|
||||
flux create helmrelease myapp \
|
||||
--source=GitRepository/flux-system \
|
||||
--chart=charts/myapp \
|
||||
--target-namespace=myapp --create-target-namespace \
|
||||
--export > apps/myapp/flux.yaml
|
||||
```
|
||||
|
||||
- Add a reference to that manifest to the top-level kustomization
|
||||
|
||||
- `git add` / `git commit` / `git push` the chart, manifest, and kustomization
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Passing values
|
||||
|
||||
- We can also configure our Helm releases with values
|
||||
|
||||
- Using an existing `myvalues.yaml` file:
|
||||
|
||||
`flux create helmrelease ... --values=myvalues.yaml`
|
||||
|
||||
- Referencing an existing ConfigMap or Secret with a `values.yaml` key:
|
||||
|
||||
`flux create helmrelease ... --values-from=ConfigMap/myapp`
|
||||
|
||||
---
|
||||
|
||||
## Gotchas
|
||||
|
||||
- When creating a HelmRelease using a chart stored in a git repository, you must:
|
||||
|
||||
- either bump the chart version (in `Chart.yaml`) after each change,
|
||||
|
||||
- or set `spec.chart.spec.reconcileStrategy` to `Revision`
|
||||
|
||||
- Why?
|
||||
|
||||
- Flux installs helm releases using packaged artifacts
|
||||
|
||||
- Artifacts are updated only when the Helm chart version changes
|
||||
|
||||
- Unless `reconcileStrategy` is set to `Revision` (instead of the default `ChartVersion`)
|
||||
|
||||
---
|
||||
|
||||
## More gotchas
|
||||
|
||||
- There is a bug in Flux that prevents using identical subcharts with aliases
|
||||
|
||||
- See [fluxcd/flux2#2505][flux2505] for details
|
||||
|
||||
[flux2505]: https://github.com/fluxcd/flux2/discussions/2505
|
||||
|
||||
---
|
||||
|
||||
## Things that we didn't talk about...
|
||||
|
||||
- Bucket sources
|
||||
|
||||
- Image automation controller
|
||||
|
||||
- Image reflector controller
|
||||
|
||||
- And more!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Implementing gitops with Flux
|
||||
:FR:- Workflow gitops avec Flux
|
||||
|
||||
<!--
|
||||
|
||||
helm upgrade --install --repo https://dl.gitea.io/charts --namespace gitea --create-namespace gitea gitea \
|
||||
--set persistence.enabled=false \
|
||||
--set redis-cluster.enabled=false \
|
||||
--set postgresql-ha.enabled=false \
|
||||
--set postgresql.enabled=true \
|
||||
--set gitea.config.session.PROVIDER=db \
|
||||
--set gitea.config.cache.ADAPTER=memory \
|
||||
#
|
||||
|
||||
### Boostrap Flux controllers
|
||||
|
||||
```bash
|
||||
mkdir -p flux/flux-system/gotk-components.yaml
|
||||
flux install --export > flux/flux-system/gotk-components.yaml
|
||||
kubectl apply -f flux/flux-system/gotk-components.yaml
|
||||
```
|
||||
|
||||
### Bootstrap GitRepository/Kustomization
|
||||
|
||||
```bash
|
||||
export REPO_URL="<gitlab_url>" DEPLOY_USERNAME="<username>"
|
||||
read -s DEPLOY_TOKEN
|
||||
flux create secret git flux-system --url="${REPO_URL}" --username="${DEPLOY_USERNAME}" --password="${DEPLOY_TOKEN}"
|
||||
flux create source git flux-system --url=$REPO_URL --branch=main --secret-ref flux-system --ignore-paths='/*,!/flux' --export > flux/flux-system/gotk-sync.yaml
|
||||
flux create kustomization flux-system --source=GitRepository/flux-system --path="./flux" --prune=true --export >> flux/flux-system/gotk-sync.yaml
|
||||
|
||||
git add flux/ && git commit -m 'feat: Setup Flux' flux/ && git push
|
||||
kubectl apply -f flux/flux-system/gotk-sync.yaml
|
||||
```
|
||||
|
||||
-->
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
├── charts/ <--- could also be in separate app repos
|
||||
│ ├── dockercoins/
|
||||
│ └── color/
|
||||
├── apps/ <--- YAML manifests for GitOps resources
|
||||
│ ├── dockercoins/ (might reference the "charts" above,
|
||||
│ ├── blue/ and/or include environment-specific
|
||||
│ ├── green/ manifests to create e.g. namespaces,
|
||||
│ ├── kube-prometheus-stack/ configmaps, secrets...)
|
||||
│ ├── cert-manager/
|
||||
│ └── traefik/
|
||||
└── clusters/ <--- per-cluster; will typically reference
|
||||
├── prod/ the "apps" above, possibly extending
|
||||
└── dev/ or adding configuration resources too
|
||||
@@ -1,4 +1,4 @@
|
||||
# Git-based workflows (GitOps)
|
||||
# Git-based workflows
|
||||
|
||||
- Deploying with `kubectl` has downsides:
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- These resources have a perfect YAML representation
|
||||
|
||||
- All we do is manipulate these YAML representations
|
||||
- All we do is manipulating these YAML representations
|
||||
|
||||
(`kubectl run` generates a YAML file that gets applied)
|
||||
|
||||
@@ -34,232 +34,229 @@
|
||||
|
||||
- control who can push to which branches
|
||||
|
||||
- have formal review processes, pull requests, test gates...
|
||||
- have formal review processes, pull requests ...
|
||||
|
||||
---
|
||||
|
||||
## Enabling git-based workflows
|
||||
|
||||
- There are a many tools out there to help us do that; with different approaches
|
||||
- There are a few tools out there to help us do that
|
||||
|
||||
- "Git host centric" approach: GitHub Actions, GitLab...
|
||||
- We'll see demos of two of them: [Flux] and [Gitkube]
|
||||
|
||||
*the workflows/action are directly initiated by the git platform*
|
||||
- There are *many* other tools, some of them with even more features
|
||||
|
||||
- "Kubernetes cluster centric" approach: [ArgoCD], [FluxCD]..
|
||||
- There are also *many* integrations with popular CI/CD systems
|
||||
|
||||
*controllers run on our clusters and trigger on repo updates*
|
||||
(e.g.: GitLab, Jenkins, ...)
|
||||
|
||||
- This is not an exhaustive list (see also: Jenkins)
|
||||
|
||||
- We're going to talk mostly about "Kubernetes cluster centric" approaches here
|
||||
|
||||
[ArgoCD]: https://argoproj.github.io/cd/
|
||||
[Flux]: https://fluxcd.io/
|
||||
[Flux]: https://www.weave.works/oss/flux/
|
||||
[Gitkube]: https://gitkube.sh/
|
||||
|
||||
---
|
||||
|
||||
## The road to production
|
||||
## Flux overview
|
||||
|
||||
In no specific order, we need to at least:
|
||||
- We put our Kubernetes resources as YAML files in a git repository
|
||||
|
||||
- Choose a tool
|
||||
- Flux polls that repository regularly (every 5 minutes by default)
|
||||
|
||||
- Choose a cluster / app / namespace layout
|
||||
<br/>
|
||||
(one cluster per app, different clusters for prod/staging...)
|
||||
- The resources described by the YAML files are created/updated automatically
|
||||
|
||||
- Choose a repository layout
|
||||
<br/>
|
||||
(different repositories, directories, branches per app, env, cluster...)
|
||||
|
||||
- Choose an installation / bootstrap method
|
||||
|
||||
- Choose how new apps / environments / versions will be deployed
|
||||
|
||||
- Choose how new images will be built
|
||||
- Changes are made by updating the code in the repository
|
||||
|
||||
---
|
||||
|
||||
## Flux vs ArgoCD (1/2)
|
||||
## Preparing a repository for Flux
|
||||
|
||||
- Flux:
|
||||
- We need a repository with Kubernetes YAML files
|
||||
|
||||
- fancy setup with an (optional) dedicated `flux bootstrap` command
|
||||
<br/>
|
||||
(with support for specific git providers, repo creation...)
|
||||
- I have one: https://github.com/jpetazzo/kubercoins
|
||||
|
||||
- deploying an app requires multiple CRDs
|
||||
<br/>
|
||||
(Kustomization, HelmRelease, GitRepository...)
|
||||
- Fork it to your GitHub account
|
||||
|
||||
- supports Helm charts, Kustomize, raw YAML
|
||||
- Create a new branch in your fork; e.g. `prod`
|
||||
|
||||
- ArgoCD:
|
||||
(e.g. with "branch" dropdown through the GitHub web UI)
|
||||
|
||||
- simple setup (just apply YAMLs / install Helm chart)
|
||||
|
||||
- fewer CRDs (basic workflow can be implement with a single "Application" resource)
|
||||
|
||||
- supports Helm charts, Jsonnet, Kustomize, raw YAML, and arbitrary plugins
|
||||
- This is the branch that we are going to use for deployment
|
||||
|
||||
---
|
||||
|
||||
## Flux vs ArgoCD (2/2)
|
||||
## Setting up Flux with kustomize
|
||||
|
||||
- Flux:
|
||||
- Clone the Flux repository:
|
||||
```bash
|
||||
git clone https://github.com/fluxcd/flux
|
||||
cd flux
|
||||
```
|
||||
|
||||
- sync interval is configurable per app
|
||||
- no web UI out of the box
|
||||
- CLI relies on Kubernetes API access
|
||||
- CLI can easily generate custom resource manifests (with `--export`)
|
||||
- self-hosted (flux controllers are managed by flux itself by default)
|
||||
- one flux instance manages a single cluster
|
||||
- Edit `deploy/flux-deployment.yaml`
|
||||
|
||||
- ArgoCD:
|
||||
- Change the `--git-url` and `--git-branch` parameters:
|
||||
```yaml
|
||||
- --git-url=git@github.com:your-git-username/kubercoins
|
||||
- --git-branch=prod
|
||||
```
|
||||
|
||||
- sync interval is configured globally
|
||||
- comes with a web UI
|
||||
- CLI can use Kubernetes API or separate API and authentication system
|
||||
- one ArgoCD instance can manage multiple clusters
|
||||
- Apply all the YAML:
|
||||
```bash
|
||||
kubectl apply -k deploy/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cluster, app, namespace layout
|
||||
## Setting up Flux with Helm
|
||||
|
||||
- One cluster per app, different namespaces for environments?
|
||||
- Add Flux helm repo:
|
||||
```bash
|
||||
helm repo add fluxcd https://charts.fluxcd.io
|
||||
```
|
||||
|
||||
- One cluster per environment, different namespaces for apps?
|
||||
|
||||
- Everything on a single cluster? One cluster per combination?
|
||||
|
||||
- Something in between:
|
||||
|
||||
- prod cluster, database cluster, dev/staging/etc cluster
|
||||
|
||||
- prod+db cluster per app, shared dev/staging/etc cluster
|
||||
|
||||
- And more!
|
||||
|
||||
Note: this decision isn't really tied to GitOps!
|
||||
- Install Flux:
|
||||
```bash
|
||||
kubectl create namespace flux
|
||||
helm upgrade --install flux \
|
||||
--set git.url=git@github.com:your-git-username/kubercoins \
|
||||
--set git.branch=prod \
|
||||
--namespace flux \
|
||||
fluxcd/flux
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Repository layout
|
||||
## Allowing Flux to access the repository
|
||||
|
||||
So many different possibilities!
|
||||
- When it starts, Flux generates an SSH key
|
||||
|
||||
- Source repos
|
||||
- Display that key:
|
||||
```bash
|
||||
kubectl -n flux logs deployment/flux | grep identity.pub | cut -d '"' -f2
|
||||
```
|
||||
|
||||
- Cluster/infra repos/branches/directories
|
||||
- Then add that key to the repository, giving it **write** access
|
||||
|
||||
- "Deployment" repos (with manifests, charts)
|
||||
(some Flux features require write access)
|
||||
|
||||
- Different repos/branches/directories for environments
|
||||
|
||||
🤔 How to decide?
|
||||
- After a minute or so, DockerCoins will be deployed to the current namespace
|
||||
|
||||
---
|
||||
|
||||
## Permissions
|
||||
## Making changes
|
||||
|
||||
- Different teams/companies = different repos
|
||||
- Make changes (on the `prod` branch), e.g. change `replicas` in `worker`
|
||||
|
||||
- separate platform team → separate "infra" vs "apps" repos
|
||||
|
||||
- teams working on different apps → different repos per app
|
||||
|
||||
- Branches can be "protected" (`production`, `main`...)
|
||||
|
||||
(don't need separate repos for separate environments)
|
||||
|
||||
- Directories will typically have the same permissions
|
||||
|
||||
- Managing directories is easier than branches
|
||||
|
||||
- But branches are more "powerful" (cherrypicking, rebasing...)
|
||||
- After a few minutes, the changes will be picked up by Flux and applied
|
||||
|
||||
---
|
||||
|
||||
## Resource hierarchy
|
||||
## Other features
|
||||
|
||||
- Git-based deployments are managed by Kubernetes resources
|
||||
- Flux can keep a list of all the tags of all the images we're running
|
||||
|
||||
(e.g. Kustomization, HelmRelease with Flux; Application with ArgoCD)
|
||||
- The `fluxctl` tool can show us if we're running the latest images
|
||||
|
||||
- We will call these resources "GitOps resources"
|
||||
- We can also "automate" a resource (i.e. automatically deploy new images)
|
||||
|
||||
- These resources need to be managed like any other Kubernetes resource
|
||||
|
||||
(YAML manifests, Kustomizations, Helm charts)
|
||||
|
||||
- They can be managed with Git workflows too!
|
||||
- And much more!
|
||||
|
||||
---
|
||||
|
||||
## Cluster / infra management
|
||||
## Gitkube overview
|
||||
|
||||
- How do we provision clusters?
|
||||
- We put our Kubernetes resources as YAML files in a git repository
|
||||
|
||||
- Manual "one-shot" provisioning (CLI, web UI...)
|
||||
- Gitkube is a git server (or "git remote")
|
||||
|
||||
- Automation with Terraform, Ansible...
|
||||
- After making changes to the repository, we push to Gitkube
|
||||
|
||||
- Kubernetes-driven systems (Crossplane, CAPI)
|
||||
|
||||
- Infrastructure can also be managed with GitOps
|
||||
- Gitkube applies the resources to the cluster
|
||||
|
||||
---
|
||||
|
||||
## Example 1
|
||||
## Setting up Gitkube
|
||||
|
||||
- Managed with YAML/Charts:
|
||||
- Install the CLI:
|
||||
```bash
|
||||
sudo curl -L -o /usr/local/bin/gitkube \
|
||||
https://github.com/hasura/gitkube/releases/download/v0.2.1/gitkube_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/gitkube
|
||||
```
|
||||
|
||||
- core components (CNI, CSI, Ingress, logging, monitoring...)
|
||||
|
||||
- GitOps controllers
|
||||
|
||||
- critical application foundations (database operator, databases)
|
||||
|
||||
- GitOps manifests
|
||||
|
||||
- Managed with GitOps:
|
||||
|
||||
- applications
|
||||
|
||||
- staging databases
|
||||
- Install Gitkube on the cluster:
|
||||
```bash
|
||||
gitkube install --expose ClusterIP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example 2
|
||||
## Creating a Remote
|
||||
|
||||
- Managed with YAML/Charts:
|
||||
- Gitkube provides a new type of API resource: *Remote*
|
||||
|
||||
- essential components (CNI, CoreDNS)
|
||||
(this is using a mechanism called Custom Resource Definitions or CRD)
|
||||
|
||||
- initial installation of GitOps controllers
|
||||
- Create and apply a YAML file containing the following manifest:
|
||||
```yaml
|
||||
apiVersion: gitkube.sh/v1alpha1
|
||||
kind: Remote
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
authorizedKeys:
|
||||
- `ssh-rsa AAA...`
|
||||
manifests:
|
||||
path: "."
|
||||
```
|
||||
|
||||
- Managed with GitOps:
|
||||
|
||||
- upgrades of GitOps controllers
|
||||
|
||||
- core components (CSI, Ingress, logging, monitoring...)
|
||||
|
||||
- operators, databases
|
||||
|
||||
- more GitOps manifests for applications!
|
||||
(replace the `ssh-rsa AAA...` section with the content of `~/.ssh/id_rsa.pub`)
|
||||
|
||||
---
|
||||
|
||||
## Concrete example
|
||||
## Pushing to our remote
|
||||
|
||||
- Source code repository (not shown here)
|
||||
- Get the `gitkubed` IP address:
|
||||
```bash
|
||||
kubectl -n kube-system get svc gitkubed
|
||||
IP=$(kubectl -n kube-system get svc gitkubed -o json |
|
||||
jq -r .spec.clusterIP)
|
||||
```
|
||||
|
||||
- Infrastructure repository (shown below), single branch
|
||||
- Get ourselves a sample repository with resource YAML files:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/kubercoins
|
||||
cd kubercoins
|
||||
```
|
||||
|
||||
```
|
||||
@@INCLUDE[slides/k8s/gitopstree.txt]
|
||||
```
|
||||
- Add the remote and push to it:
|
||||
```bash
|
||||
git remote add k8s ssh://default-example@$IP/~/git/default-example
|
||||
git push k8s master
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Making changes
|
||||
|
||||
- Edit a local file
|
||||
|
||||
- Commit
|
||||
|
||||
- Push!
|
||||
|
||||
- Make sure that you push to the `k8s` remote
|
||||
|
||||
---
|
||||
|
||||
## Other features
|
||||
|
||||
- Gitkube can also build container images for us
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
- instructions indicating to users "please tweak this and that in the YAML"
|
||||
|
||||
- That's where using something like
|
||||
[CUE](https://github.com/cue-labs/cue-by-example/tree/main/003_kubernetes_tutorial),
|
||||
[CUE](https://github.com/cuelang/cue/blob/v0.3.2/doc/tutorial/kubernetes/README.md),
|
||||
[Kustomize](https://kustomize.io/),
|
||||
or [Helm](https://helm.sh/) can help!
|
||||
|
||||
@@ -86,6 +86,8 @@
|
||||
|
||||
- On April 30th 2020, Helm was the 10th project to *graduate* within the CNCF
|
||||
|
||||
🎉
|
||||
|
||||
(alongside Containerd, Prometheus, and Kubernetes itself)
|
||||
|
||||
- This is an acknowledgement by the CNCF for projects that
|
||||
@@ -97,8 +99,6 @@
|
||||
- See [CNCF announcement](https://www.cncf.io/announcement/2020/04/30/cloud-native-computing-foundation-announces-helm-graduation/)
|
||||
and [Helm announcement](https://helm.sh/blog/celebrating-helms-cncf-graduation/)
|
||||
|
||||
- In other words: Helm is here to stay
|
||||
|
||||
---
|
||||
|
||||
## Helm concepts
|
||||
@@ -173,13 +173,11 @@ or `apt` tools).
|
||||
|
||||
- Helm 3 doesn't use `tiller` at all, making it simpler (yay!)
|
||||
|
||||
- If you see references to `tiller` in a tutorial, documentation... that doc is obsolete!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What was the problem with `tiller`?
|
||||
## With or without `tiller`
|
||||
|
||||
- With Helm 3:
|
||||
|
||||
@@ -195,7 +193,9 @@ class: extra-details
|
||||
|
||||
- This indirect model caused significant permissions headaches
|
||||
|
||||
- It also made it more complicated to embed Helm in other tools
|
||||
(`tiller` required very broad permissions to function)
|
||||
|
||||
- `tiller` was removed in Helm 3 to simplify the security aspects
|
||||
|
||||
---
|
||||
|
||||
@@ -222,6 +222,59 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Only if using Helm 2 ...
|
||||
|
||||
- We need to install Tiller and give it some permissions
|
||||
|
||||
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
|
||||
|
||||
- They can be managed (installed, upgraded...) with the `helm` CLI
|
||||
|
||||
.lab[
|
||||
|
||||
- Deploy Tiller:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
At the end of the install process, you will see:
|
||||
|
||||
```
|
||||
Happy Helming!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Only if using Helm 2 ...
|
||||
|
||||
- Tiller needs permissions to create Kubernetes resources
|
||||
|
||||
- In a more realistic deployment, you might create per-user or per-team
|
||||
service accounts, roles, and role bindings
|
||||
|
||||
.lab[
|
||||
|
||||
- Grant `cluster-admin` role to `kube-system:default` service account:
|
||||
```bash
|
||||
kubectl create clusterrolebinding add-on-cluster-admin \
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
|
||||
]
|
||||
|
||||
(Defining the exact roles and permissions on your cluster requires
|
||||
a deeper knowledge of Kubernetes' RBAC model. The command above is
|
||||
fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## Charts and repositories
|
||||
|
||||
- A *repository* (or repo in short) is a collection of charts
|
||||
@@ -240,7 +293,27 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## How to find charts
|
||||
class: extra-details
|
||||
|
||||
## How to find charts, the old way
|
||||
|
||||
- Helm 2 came with one pre-configured repo, the "stable" repo
|
||||
|
||||
(located at https://charts.helm.sh/stable)
|
||||
|
||||
- Helm 3 doesn't have any pre-configured repo
|
||||
|
||||
- The "stable" repo mentioned above is now being deprecated
|
||||
|
||||
- The new approach is to have fully decentralized repos
|
||||
|
||||
- Repos can be indexed in the Artifact Hub
|
||||
|
||||
(which supersedes the Helm Hub)
|
||||
|
||||
---
|
||||
|
||||
## How to find charts, the new way
|
||||
|
||||
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
|
||||
|
||||
@@ -336,6 +409,24 @@ Note: it is also possible to install directly a chart, with `--repo https://...`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Searching and installing with Helm 2
|
||||
|
||||
- Helm 2 doesn't have support for the Helm Hub
|
||||
|
||||
- The `helm search` command only takes a search string argument
|
||||
|
||||
(e.g. `helm search juice-shop`)
|
||||
|
||||
- With Helm 2, the name is optional:
|
||||
|
||||
`helm install juice/juice-shop` will automatically generate a name
|
||||
|
||||
`helm install --name my-juice-shop juice/juice-shop` will specify a name
|
||||
|
||||
---
|
||||
|
||||
## Viewing resources of a release
|
||||
|
||||
- This specific chart labels all its resources with a `release` label
|
||||
@@ -451,11 +542,11 @@ All unspecified values will take the default values defined in the chart.
|
||||
|
||||
:EN:- Helm concepts
|
||||
:EN:- Installing software with Helm
|
||||
:EN:- Finding charts on the Artifact Hub
|
||||
:EN:- Helm 2, Helm 3, and the Helm Hub
|
||||
|
||||
:FR:- Fonctionnement général de Helm
|
||||
:FR:- Installer des composants via Helm
|
||||
:FR:- Trouver des *charts* sur *Artifact Hub*
|
||||
:FR:- Helm 2, Helm 3, et le *Helm Hub*
|
||||
|
||||
:T: Getting started with Helm and its concepts
|
||||
|
||||
|
||||
314
slides/k8s/hpa-v2-keda.md
Normal file
314
slides/k8s/hpa-v2-keda.md
Normal file
@@ -0,0 +1,314 @@
|
||||
# Scaling with custom metrics
|
||||
|
||||
- The HorizontalPodAutoscaler v1 can only scale on Pod CPU usage
|
||||
|
||||
- Sometimes, we need to scale using other metrics:
|
||||
|
||||
- memory
|
||||
|
||||
- requests per second
|
||||
|
||||
- latency
|
||||
|
||||
- active sessions
|
||||
|
||||
- items in a work queue
|
||||
|
||||
- ...
|
||||
|
||||
- The HorizontalPodAutoscaler v2 can do it!
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
⚠️ Autoscaling on custom metrics is fairly complex!
|
||||
|
||||
- We need some metrics system
|
||||
|
||||
(Prometheus is a popular option, but others are possible too)
|
||||
|
||||
- We need our metrics (latency, traffic...) to be fed in the system
|
||||
|
||||
(with Prometheus, this might require a custom exporter)
|
||||
|
||||
- We need to expose these metrics to Kubernetes
|
||||
|
||||
(Kubernetes doesn't "speak" the Prometheus API)
|
||||
|
||||
- Then we can set up autoscaling!
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- We will deploy the DockerCoins demo app
|
||||
|
||||
(one of its components has a bottleneck; its latency will increase under load)
|
||||
|
||||
- We will use Prometheus to collect and store metrics
|
||||
|
||||
- We will deploy a tiny HTTP latency monitor (a Prometheus *exporter*)
|
||||
|
||||
- We will then use KEDA with a "Prometheus Scaler"
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins
|
||||
|
||||
- That's the easy part!
|
||||
|
||||
.lab[
|
||||
|
||||
- Create a new namespace and switch to it:
|
||||
```bash
|
||||
kubectl create namespace customscaling
|
||||
kns customscaling
|
||||
```
|
||||
|
||||
- Deploy DockerCoins, and scale up the `worker` Deployment:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
|
||||
kubectl scale deployment worker --replicas=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Current state of affairs
|
||||
|
||||
- The `rng` service is a bottleneck
|
||||
|
||||
(it cannot handle more than 10 requests/second)
|
||||
|
||||
- With enough traffic, its latency increases
|
||||
|
||||
(by about 100ms per `worker` Pod after the 3rd worker)
|
||||
|
||||
.lab[
|
||||
|
||||
- Check the `webui` port and open it in your browser:
|
||||
```bash
|
||||
kubectl get service webui
|
||||
```
|
||||
|
||||
- Check the `rng` ClusterIP and test it with e.g. `httping`:
|
||||
```bash
|
||||
kubectl get service rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Measuring latency
|
||||
|
||||
- We will use a tiny custom Prometheus exporter, [httplat](https://github.com/jpetazzo/httplat)
|
||||
|
||||
- `httplat` exposes Prometheus metrics on port 9080 (by default)
|
||||
|
||||
- It monitors exactly one URL, that must be passed as a command-line argument
|
||||
|
||||
.lab[
|
||||
|
||||
- Deploy `httplat`:
|
||||
```bash
|
||||
kubectl create deployment httplat --image=jpetazzo/httplat -- httplat http://rng/
|
||||
```
|
||||
|
||||
- Expose it:
|
||||
```bash
|
||||
kubectl expose deployment httplat --port=9080
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Measuring latency in the real world
|
||||
|
||||
- We are using this tiny custom exporter for simplicity
|
||||
|
||||
- A more common method to collect latency is to use a service mesh
|
||||
|
||||
- A service mesh can usually collect latency for *all* services automatically
|
||||
|
||||
---
|
||||
|
||||
## Install Prometheus
|
||||
|
||||
- We will use the Prometheus community Helm chart
|
||||
|
||||
(because we can configure it dynamically with annotations)
|
||||
|
||||
.lab[
|
||||
|
||||
- If it's not installed yet on the cluster, install Prometheus:
|
||||
```bash
|
||||
helm upgrade --install prometheus prometheus \
|
||||
--repo https://prometheus-community.github.io/helm-charts \
|
||||
--namespace prometheus --create-namespace \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Configure Prometheus
|
||||
|
||||
- We can use annotations to tell Prometheus to collect the metrics
|
||||
|
||||
.lab[
|
||||
|
||||
- Tell Prometheus to "scrape" our latency exporter:
|
||||
```bash
|
||||
kubectl annotate service httplat \
|
||||
prometheus.io/scrape=true \
|
||||
prometheus.io/port=9080 \
|
||||
prometheus.io/path=/metrics
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If you deployed Prometheus differently, you might have to configure it manually.
|
||||
|
||||
You'll need to instruct it to scrape http://httplat.customscaling.svc:9080/metrics.
|
||||
|
||||
---
|
||||
|
||||
## Make sure that metrics get collected
|
||||
|
||||
- Before moving on, confirm that Prometheus has our metrics
|
||||
|
||||
.lab[
|
||||
|
||||
- Connect to Prometheus
|
||||
|
||||
(if you installed it like instructed above, it is exposed as a NodePort on port 30090)
|
||||
|
||||
- Check that `httplat` metrics are available
|
||||
|
||||
- You can try to graph the following PromQL expression:
|
||||
```
|
||||
rate(httplat_latency_seconds_sum[2m])/rate(httplat_latency_seconds_count[2m])
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Make sure that the exporter works:
|
||||
|
||||
- get the ClusterIP of the exporter with `kubectl get svc httplat`
|
||||
|
||||
- `curl http://<ClusterIP>:9080/metrics`
|
||||
|
||||
- check that the result includes the `httplat` histogram
|
||||
|
||||
- Make sure that Prometheus is scraping the exporter:
|
||||
|
||||
- go to `Status` / `Targets` in Prometheus
|
||||
|
||||
- make sure that `httplat` shows up in there
|
||||
|
||||
---
|
||||
|
||||
## Installing KEDA
|
||||
|
||||
- Multiple possibilities, as explained in the [documentation](https://keda.sh/docs/2.12/deploy/)
|
||||
|
||||
- For simplicity we can use the YAML version with admission webhooks
|
||||
|
||||
---
|
||||
|
||||
## Creating a "Scaler"
|
||||
|
||||
- With KEDA, instead of creating an HPA policy directly, we create a "Scaled Object"
|
||||
|
||||
- The "Scaled Object" will take care of:
|
||||
|
||||
- registering and exposing our custom metric in KEDA'a aggregation layer
|
||||
|
||||
- creating the HPA policy that consumes that metric
|
||||
|
||||
- See the [Prometheus Scaler documentation](https://keda.sh/docs/2.12/scalers/prometheus/)
|
||||
|
||||
---
|
||||
|
||||
## Witness the marvel of custom autoscaling
|
||||
|
||||
(Sort of)
|
||||
|
||||
- After a short while, the `rng` Deployment will scale up
|
||||
|
||||
- It should scale up until the latency drops below 100ms
|
||||
|
||||
(and continue to scale up a little bit more after that)
|
||||
|
||||
- Then, since the latency will be well below 100ms, it will scale down
|
||||
|
||||
- ... and back up again, etc.
|
||||
|
||||
(See pictures on next slides!)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's going on?
|
||||
|
||||
- The autoscaler's information is slightly out of date
|
||||
|
||||
(not by much; probably between 1 and 2 minute)
|
||||
|
||||
- It's enough to cause the oscillations to happen
|
||||
|
||||
- One possible fix is to tell the autoscaler to wait a bit after each action
|
||||
|
||||
- It will reduce oscillations, but will also slow down its reaction time
|
||||
|
||||
(and therefore, how fast it reacts to a peak of traffic)
|
||||
|
||||
---
|
||||
|
||||
## What's going on? Take 2
|
||||
|
||||
- As soon as the measured latency is *significantly* below our target (100ms) ...
|
||||
|
||||
the autoscaler tries to scale down
|
||||
|
||||
- If the latency is measured at 20ms ...
|
||||
|
||||
the autoscaler will try to *divide the number of pods by five!*
|
||||
|
||||
- One possible solution: apply a formula to the measured latency,
|
||||
so that values between e.g. 10 and 100ms get very close to 100ms.
|
||||
|
||||
- Another solution: instead of targetting for a specific latency,
|
||||
target a 95th percentile latency or something similar, using
|
||||
a more advanced PromQL expression (and leveraging the fact that
|
||||
we have histograms instead of raw values).
|
||||
|
||||
???
|
||||
|
||||
:EN:- Autoscaling with custom metrics
|
||||
:FR:- Suivi de charge avancé (HPAv2)
|
||||
@@ -572,7 +572,7 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
- Create a prefix match rule for the `blue` service:
|
||||
```bash
|
||||
kubectl create ingress bluestar --rule=/blue*=blue:80
|
||||
kubectl create ingress bluestar --rule=/blue*:blue:80
|
||||
```
|
||||
|
||||
- Check that it works:
|
||||
|
||||
@@ -128,9 +128,7 @@ configMapGenerator:
|
||||
|
||||
- A *variant* is the final outcome of applying bases + overlays
|
||||
|
||||
(See the [kustomize glossary][glossary] for more definitions!)
|
||||
|
||||
[glossary]: https://kubectl.docs.kubernetes.io/references/kustomize/glossary/
|
||||
(See the [kustomize glossary](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md) for more definitions!)
|
||||
|
||||
---
|
||||
|
||||
@@ -339,7 +337,7 @@ kustomize edit add label app.kubernetes.io/name:dockercoins
|
||||
|
||||
- Assuming that `commonLabels` have been set as shown on the previous slide:
|
||||
```bash
|
||||
kubectl apply -k . --prune --selector app.kubernetes.io/name=dockercoins
|
||||
kubectl apply -k . --prune --selector app.kubernetes.io.name=dockercoins
|
||||
```
|
||||
|
||||
- ... This command removes resources that have been removed from the kustomization
|
||||
|
||||
@@ -536,12 +536,12 @@ Note: the `apiVersion` field appears to be optional.
|
||||
- Excerpt:
|
||||
```yaml
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
```
|
||||
|
||||
- Note that we have to specify the `namespace`
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
- "New" policies
|
||||
|
||||
(available in alpha since Kubernetes 1.22, and GA since Kubernetes 1.25)
|
||||
(available in alpha since Kubernetes 1.22)
|
||||
|
||||
- Easier to use
|
||||
|
||||
@@ -66,6 +66,50 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## PSA in practice
|
||||
|
||||
- Step 1: enable the PodSecurity admission plugin
|
||||
|
||||
- Step 2: label some Namespaces
|
||||
|
||||
- Step 3: provide an AdmissionConfiguration (optional)
|
||||
|
||||
- Step 4: profit!
|
||||
|
||||
---
|
||||
|
||||
## Enabling PodSecurity
|
||||
|
||||
- This requires Kubernetes 1.22 or later
|
||||
|
||||
- This requires the ability to reconfigure the API server
|
||||
|
||||
- The following slides assume that we're using `kubeadm`
|
||||
|
||||
(and have write access to `/etc/kubernetes/manifests`)
|
||||
|
||||
---
|
||||
|
||||
## Reconfiguring the API server
|
||||
|
||||
- In Kubernetes 1.22, we need to enable the `PodSecurity` feature gate
|
||||
|
||||
- In later versions, this might be enabled automatically
|
||||
|
||||
.lab[
|
||||
|
||||
- Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`
|
||||
|
||||
- In the `command` list, add `--feature-gates=PodSecurity=true`
|
||||
|
||||
- Save, quit, wait for the API server to be back up again
|
||||
|
||||
]
|
||||
|
||||
Note: for bonus points, edit the `kubeadm-config` ConfigMap instead!
|
||||
|
||||
---
|
||||
|
||||
## Namespace labels
|
||||
|
||||
- Three optional labels can be added to namespaces:
|
||||
@@ -233,6 +277,14 @@ Let's use @@LINK[k8s/admission-configuration.yaml]:
|
||||
|
||||
- But the Pods don't get created
|
||||
|
||||
---
|
||||
|
||||
## Clean up
|
||||
|
||||
- We probably want to remove the API server flags that we added
|
||||
|
||||
(the feature gate and the admission configuration)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Preventing privilege escalation with Pod Security Admission
|
||||
|
||||
@@ -124,7 +124,7 @@
|
||||
|
||||
## Admission plugins
|
||||
|
||||
- [PodSecurityPolicy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) (was removed in Kubernetes 1.25)
|
||||
- [PodSecurityPolicy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) (will be removed in Kubernetes 1.25)
|
||||
|
||||
- create PodSecurityPolicy resources
|
||||
|
||||
@@ -132,7 +132,7 @@
|
||||
|
||||
- create RoleBinding that grants the Role to a user or ServiceAccount
|
||||
|
||||
- [PodSecurityAdmission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) (alpha since Kubernetes 1.22, stable since 1.25)
|
||||
- [PodSecurityAdmission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) (alpha since Kubernetes 1.22)
|
||||
|
||||
- use pre-defined policies (privileged, baseline, restricted)
|
||||
|
||||
@@ -162,31 +162,9 @@
|
||||
|
||||
---
|
||||
|
||||
## Validating Admission Policies
|
||||
|
||||
- Alternative to validating admission webhooks
|
||||
|
||||
- Evaluated in the API server
|
||||
|
||||
(don't require an external server; don't add network latency)
|
||||
|
||||
- Written in CEL (Common Expression Language)
|
||||
|
||||
- alpha in K8S 1.26; beta in K8S 1.28; GA in K8S 1.30
|
||||
|
||||
- Can replace validating webhooks at least in simple cases
|
||||
|
||||
- Can extend Pod Security Admission
|
||||
|
||||
- Check [the documentation][vapdoc] for examples
|
||||
|
||||
[vapdoc]: https://kubernetes.io/docs/reference/access-authn-authz/validating-admission-policy/
|
||||
|
||||
---
|
||||
|
||||
## Acronym salad
|
||||
|
||||
- PSP = Pod Security Policy **(deprecated)**
|
||||
- PSP = Pod Security Policy
|
||||
|
||||
- an admission plugin called PodSecurityPolicy
|
||||
|
||||
|
||||
@@ -2,15 +2,11 @@
|
||||
|
||||
- "Legacy" policies
|
||||
|
||||
(deprecated since Kubernetes 1.21; removed in 1.25)
|
||||
(deprecated since Kubernetes 1.21; will be removed in 1.25)
|
||||
|
||||
- Superseded by Pod Security Standards + Pod Security Admission
|
||||
|
||||
(available in alpha since Kubernetes 1.22; stable since 1.25)
|
||||
|
||||
- **Since Kubernetes 1.24 was EOL in July 2023, nobody should use PSPs anymore!**
|
||||
|
||||
- This section is here mostly for historical purposes, and can be skipped
|
||||
(available in alpha since Kubernetes 1.22)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Pre-requirements
|
||||
# Pre-requirements
|
||||
|
||||
- Kubernetes concepts
|
||||
|
||||
|
||||
@@ -533,193 +533,6 @@ This set of resources makes sure that this service won't be killed (as long as i
|
||||
|
||||
---
|
||||
|
||||
# Defining min, max, and default resources
|
||||
|
||||
- We can create LimitRange objects to indicate any combination of:
|
||||
|
||||
- min and/or max resources allowed per pod
|
||||
|
||||
- default resource *limits*
|
||||
|
||||
- default resource *requests*
|
||||
|
||||
- maximal burst ratio (*limit/request*)
|
||||
|
||||
- LimitRange objects are namespaced
|
||||
|
||||
- They apply to their namespace only
|
||||
|
||||
---
|
||||
|
||||
## LimitRange example
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: my-very-detailed-limitrange
|
||||
spec:
|
||||
limits:
|
||||
- type: Container
|
||||
min:
|
||||
cpu: "100m"
|
||||
max:
|
||||
cpu: "2000m"
|
||||
memory: "1Gi"
|
||||
default:
|
||||
cpu: "500m"
|
||||
memory: "250Mi"
|
||||
defaultRequest:
|
||||
cpu: "500m"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example explanation
|
||||
|
||||
The YAML on the previous slide shows an example LimitRange object specifying very detailed limits on CPU usage,
|
||||
and providing defaults on RAM usage.
|
||||
|
||||
Note the `type: Container` line: in the future,
|
||||
it might also be possible to specify limits
|
||||
per Pod, but it's not [officially documented yet](https://github.com/kubernetes/website/issues/9585).
|
||||
|
||||
---
|
||||
|
||||
## LimitRange details
|
||||
|
||||
- LimitRange restrictions are enforced only when a Pod is created
|
||||
|
||||
(they don't apply retroactively)
|
||||
|
||||
- They don't prevent creation of e.g. an invalid Deployment or DaemonSet
|
||||
|
||||
(but the pods will not be created as long as the LimitRange is in effect)
|
||||
|
||||
- If there are multiple LimitRange restrictions, they all apply together
|
||||
|
||||
(which means that it's possible to specify conflicting LimitRanges,
|
||||
<br/>preventing any Pod from being created)
|
||||
|
||||
- If a LimitRange specifies a `max` for a resource but no `default`,
|
||||
<br/>that `max` value becomes the `default` limit too
|
||||
|
||||
---
|
||||
|
||||
# Namespace quotas
|
||||
|
||||
- We can also set quotas per namespace
|
||||
|
||||
- Quotas apply to the total usage in a namespace
|
||||
|
||||
(e.g. total CPU limits of all pods in a given namespace)
|
||||
|
||||
- Quotas can apply to resource limits and/or requests
|
||||
|
||||
(like the CPU and memory limits that we saw earlier)
|
||||
|
||||
- Quotas can also apply to other resources:
|
||||
|
||||
- "extended" resources (like GPUs)
|
||||
|
||||
- storage size
|
||||
|
||||
- number of objects (number of pods, services...)
|
||||
|
||||
---
|
||||
|
||||
## Creating a quota for a namespace
|
||||
|
||||
- Quotas are enforced by creating a ResourceQuota object
|
||||
|
||||
- ResourceQuota objects are namespaced, and apply to their namespace only
|
||||
|
||||
- We can have multiple ResourceQuota objects in the same namespace
|
||||
|
||||
- The most restrictive values are used
|
||||
|
||||
---
|
||||
|
||||
## Limiting total CPU/memory usage
|
||||
|
||||
- The following YAML specifies an upper bound for *limits* and *requests*:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: a-little-bit-of-compute
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "10"
|
||||
requests.memory: 10Gi
|
||||
limits.cpu: "20"
|
||||
limits.memory: 20Gi
|
||||
```
|
||||
|
||||
These quotas will apply to the namespace where the ResourceQuota is created.
|
||||
|
||||
---
|
||||
|
||||
## Limiting number of objects
|
||||
|
||||
- The following YAML specifies how many objects of specific types can be created:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: quota-for-objects
|
||||
spec:
|
||||
hard:
|
||||
pods: 100
|
||||
services: 10
|
||||
secrets: 10
|
||||
configmaps: 10
|
||||
persistentvolumeclaims: 20
|
||||
services.nodeports: 0
|
||||
services.loadbalancers: 0
|
||||
count/roles.rbac.authorization.k8s.io: 10
|
||||
```
|
||||
|
||||
(The `count/` syntax allows limiting arbitrary objects, including CRDs.)
|
||||
|
||||
---
|
||||
|
||||
## YAML vs CLI
|
||||
|
||||
- Quotas can be created with a YAML definition
|
||||
|
||||
- ...Or with the `kubectl create quota` command
|
||||
|
||||
- Example:
|
||||
```bash
|
||||
kubectl create quota my-resource-quota --hard=pods=300,limits.memory=300Gi
|
||||
```
|
||||
|
||||
- With both YAML and CLI form, the values are always under the `hard` section
|
||||
|
||||
(there is no `soft` quota)
|
||||
|
||||
---
|
||||
|
||||
## Viewing current usage
|
||||
|
||||
When a ResourceQuota is created, we can see how much of it is used:
|
||||
|
||||
```
|
||||
kubectl describe resourcequota my-resource-quota
|
||||
|
||||
Name: my-resource-quota
|
||||
Namespace: default
|
||||
Resource Used Hard
|
||||
-------- ---- ----
|
||||
pods 12 100
|
||||
services 1 5
|
||||
services.loadbalancers 0 0
|
||||
services.nodeports 0 0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Advanced quotas and PriorityClass
|
||||
|
||||
- Pods can have a *priority*
|
||||
@@ -754,130 +567,6 @@ services.nodeports 0 0
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## PriorityClass and ResourceQuotas
|
||||
|
||||
- A ResourceQuota can include a list of *scopes* or a *scope selector*
|
||||
|
||||
- In that case, the quota will only apply to the scoped resources
|
||||
|
||||
- Example: limit the resources allocated to "high priority" Pods
|
||||
|
||||
- In that case, make sure that the quota is created in every Namespace
|
||||
|
||||
(or use *admission configuration* to enforce it)
|
||||
|
||||
- See the [resource quotas documentation][quotadocs] for details
|
||||
|
||||
[quotadocs]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#resource-quota-per-priorityclass
|
||||
|
||||
---
|
||||
|
||||
# Limiting resources in practice
|
||||
|
||||
- We have at least three mechanisms:
|
||||
|
||||
- requests and limits per Pod
|
||||
|
||||
- LimitRange per namespace
|
||||
|
||||
- ResourceQuota per namespace
|
||||
|
||||
- Let's see one possible strategy to get started with resource limits
|
||||
|
||||
---
|
||||
|
||||
## Set a LimitRange
|
||||
|
||||
- In each namespace, create a LimitRange object
|
||||
|
||||
- Set a small default CPU request and CPU limit
|
||||
|
||||
(e.g. "100m")
|
||||
|
||||
- Set a default memory request and limit depending on your most common workload
|
||||
|
||||
- for Java, Ruby: start with "1G"
|
||||
|
||||
- for Go, Python, PHP, Node: start with "250M"
|
||||
|
||||
- Set upper bounds slightly below your expected node size
|
||||
|
||||
(80-90% of your node size, with at least a 500M memory buffer)
|
||||
|
||||
---
|
||||
|
||||
## Set a ResourceQuota
|
||||
|
||||
- In each namespace, create a ResourceQuota object
|
||||
|
||||
- Set generous CPU and memory limits
|
||||
|
||||
(e.g. half the cluster size if the cluster hosts multiple apps)
|
||||
|
||||
- Set generous objects limits
|
||||
|
||||
- these limits should not be here to constrain your users
|
||||
|
||||
- they should catch a runaway process creating many resources
|
||||
|
||||
- example: a custom controller creating many pods
|
||||
|
||||
---
|
||||
|
||||
## Observe, refine, iterate
|
||||
|
||||
- Observe the resource usage of your pods
|
||||
|
||||
(we will see how in the next chapter)
|
||||
|
||||
- Adjust individual pod limits
|
||||
|
||||
- If you see trends: adjust the LimitRange
|
||||
|
||||
(rather than adjusting every individual set of pod limits)
|
||||
|
||||
- Observe the resource usage of your namespaces
|
||||
|
||||
(with `kubectl describe resourcequota ...`)
|
||||
|
||||
- Rinse and repeat regularly
|
||||
|
||||
---
|
||||
|
||||
## Underutilization
|
||||
|
||||
- Remember: when assigning a pod to a node, the scheduler looks at *requests*
|
||||
|
||||
(not at current utilization on the node)
|
||||
|
||||
- If pods request resources but don't use them, this can lead to underutilization
|
||||
|
||||
(because the scheduler will consider that the node is full and can't fit new pods)
|
||||
|
||||
---
|
||||
|
||||
## Viewing a namespace limits and quotas
|
||||
|
||||
- `kubectl describe namespace` will display resource limits and quotas
|
||||
|
||||
.lab[
|
||||
|
||||
- Try it out:
|
||||
```bash
|
||||
kubectl describe namespace default
|
||||
```
|
||||
|
||||
- View limits and quotas for *all* namespaces:
|
||||
```bash
|
||||
kubectl describe namespace
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [A Practical Guide to Setting Kubernetes Requests and Limits](http://blog.kubecost.com/blog/requests-and-limits/)
|
||||
|
||||
@@ -352,87 +352,6 @@ class: pic
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Traffic engineering
|
||||
|
||||
- By default, connections to a ClusterIP or a NodePort are load balanced
|
||||
across all the backends of their Service
|
||||
|
||||
- This can incur extra network hops (which add latency)
|
||||
|
||||
- To remove that extra hop, multiple mechanisms are available:
|
||||
|
||||
- `spec.externalTrafficPolicy`
|
||||
|
||||
- `spec.internalTrafficPolicy`
|
||||
|
||||
- [Topology aware routing](https://kubernetes.io/docs/concepts/services-networking/topology-aware-routing/) annotation (beta)
|
||||
|
||||
- `spec.trafficDistribution` (alpha in 1.30, beta in 1.31)
|
||||
|
||||
---
|
||||
|
||||
## `internal / externalTrafficPolicy`
|
||||
|
||||
- Applies respectively to `ClusterIP` and `NodePort` connections
|
||||
|
||||
- Can be set to `Cluster` or `Local`
|
||||
|
||||
- `Cluster`: load balance connections across all backends (default)
|
||||
|
||||
- `Local`: load balance connections to local backends (on the same node)
|
||||
|
||||
- With `Local`, if there is no local backend, the connection will fail!
|
||||
|
||||
(the parameter expresses a "hard rule", not a preference)
|
||||
|
||||
- Example: `externalTrafficPolicy: Local` for Ingress controllers
|
||||
|
||||
(as shown on earlier diagrams)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Topology aware routing
|
||||
|
||||
- In beta since Kubernetes 1.23
|
||||
|
||||
- Enabled with annotation `service.kubernetes.io/topology-mode=Auto`
|
||||
|
||||
- Relies on node annotation `topology.kubernetes.io/zone`
|
||||
|
||||
- Kubernetes service proxy will try to keep connections within a zone
|
||||
|
||||
(connections made by a pod in zone `a` will be sent to pods in zone `a`)
|
||||
|
||||
- ...Except if there are no pods in the zone (then fallback to all zones)
|
||||
|
||||
- This can mess up autoscaling!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `spec.trafficDistribution`
|
||||
|
||||
- [KEP4444, Traffic Distribution for Services][kep4444]
|
||||
|
||||
- In alpha since Kubernetes 1.30, beta since Kubernetes 1.31
|
||||
|
||||
- Should eventually supersede topology aware routing
|
||||
|
||||
- Can be set to `PreferClose` (more values might be supported later)
|
||||
|
||||
- The meaning of `PreferClose` is implementation dependent
|
||||
|
||||
(with kube-proxy, it should work like topology aware routing: stay in a zone)
|
||||
|
||||
[kep4444]: https://github.com/kubernetes/enhancements/issues/4444
|
||||
|
||||
???
|
||||
|
||||
:EN:- Service types: ClusterIP, NodePort, LoadBalancer
|
||||
|
||||
@@ -144,30 +144,6 @@
|
||||
|
||||
---
|
||||
|
||||
## [Orbstack](https://orbstack.dev/)
|
||||
|
||||
- Mac only
|
||||
|
||||
- Runs Docker containers, Kubernetes, and Linux virtual machines
|
||||
|
||||
- Emphasis on speed and energy usage (battery life)
|
||||
|
||||
- Great support for `ClusterIP` and `LoadBalancer` services
|
||||
|
||||
- Free for personal use; paid product otherwise
|
||||
|
||||
---
|
||||
|
||||
## [Podman Desktop](https://podman-desktop.io/)
|
||||
|
||||
- Available on Linux, Mac, and Windows
|
||||
|
||||
- Free and open-source
|
||||
|
||||
- Doesn't support Kubernetes directly, but [supports KinD](https://podman-desktop.io/docs/kind)
|
||||
|
||||
---
|
||||
|
||||
## [Rancher Desktop](https://rancherdesktop.io/)
|
||||
|
||||
- Available on Linux, Mac, and Windows
|
||||
@@ -182,6 +158,8 @@
|
||||
|
||||
- Emphasis on ease of use (like Docker Desktop)
|
||||
|
||||
- Relatively young product (first release in May 2021)
|
||||
|
||||
- Based on k3s and other proven components
|
||||
|
||||
---
|
||||
|
||||
@@ -166,15 +166,17 @@
|
||||
|
||||
- [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) by Kelsey Hightower
|
||||
|
||||
*step by step guide to install Kubernetes on GCP, with certificates, HA...*
|
||||
- step by step guide to install Kubernetes on Google Cloud
|
||||
|
||||
- covers certificates, high availability ...
|
||||
|
||||
- *“Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster.”*
|
||||
|
||||
- [Deep Dive into Kubernetes Internals for Builders and Operators](https://www.youtube.com/watch?v=3KtEAa7_duA)
|
||||
|
||||
*conference talk setting up a simplified Kubernetes cluster - no security or HA*
|
||||
- conference presentation showing step-by-step control plane setup
|
||||
|
||||
- 🇫🇷[Démystifions les composants internes de Kubernetes](https://www.youtube.com/watch?v=OCMNA0dSAzc)
|
||||
|
||||
*improved version of the previous one, with certs and recent k8s versions*
|
||||
- emphasis on simplicity, not on security and availability
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc-easy.md
|
||||
-
|
||||
- k8s/dmuc-medium.md
|
||||
- k8s/dmuc-hard.md
|
||||
#- k8s/multinode.md
|
||||
#- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
#- k8s/interco.md
|
||||
-
|
||||
- k8s/apilb.md
|
||||
#- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
#- k8s/cloud-controller-manager.md
|
||||
#- k8s/bootstrap.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
-
|
||||
#- k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,96 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc-easy.md
|
||||
- - k8s/dmuc-medium.md
|
||||
- k8s/dmuc-hard.md
|
||||
#- k8s/multinode.md
|
||||
#- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
#- k8s/interco.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/disruptions.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/eck.md
|
||||
###- k8s/operators-design.md
|
||||
###- k8s/operators-example.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
@@ -1,93 +0,0 @@
|
||||
title: |
|
||||
Advanced
|
||||
Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/prereqs-advanced.md
|
||||
- shared/handson.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc-easy.md
|
||||
- #2
|
||||
- k8s/dmuc-medium.md
|
||||
- k8s/dmuc-hard.md
|
||||
#- k8s/multinode.md
|
||||
#- k8s/cni.md
|
||||
#- k8s/interco.md
|
||||
- k8s/cni-internals.md
|
||||
- #3
|
||||
- k8s/apilb.md
|
||||
- k8s/control-plane-auth.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- #4
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
- k8s/ytt.md
|
||||
- #5
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
- #6
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/eck.md
|
||||
- #7
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #8
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/hpa-v2.md
|
||||
- #9
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
#- k8s/record.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
- k8s/dashboard.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/openebs.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
#- k8s/scalingdockercoins.md
|
||||
#- shared/hastyconclusions.md
|
||||
#- k8s/daemonset.md
|
||||
#- shared/yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/ytt.md
|
||||
#- k8s/gitlab.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/create-more-charts.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/user-cert.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/pod-security-intro.md
|
||||
#- k8s/pod-security-policies.md
|
||||
#- k8s/pod-security-admission.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/consul.md
|
||||
#- k8s/pv-pvc-sc.md
|
||||
#- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/stateful-failover.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/crd.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/operators-example.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/finalizers.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
#- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
#- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,91 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
#- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- - k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
#- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
#- k8s/service-types.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/record.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/ytt.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,174 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
-
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- shared/yaml.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/namespaces.md
|
||||
-
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
- k8s/ytt.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/pod-security-intro.md
|
||||
- k8s/pod-security-policies.md
|
||||
- k8s/pod-security-admission.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
-
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
-
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/flux.md
|
||||
- k8s/argocd.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/prometheus-stack.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/disruptions.md
|
||||
- k8s/cluster-autoscaler.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/hpa-v2.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/crd.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/admission.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/eck.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
-
|
||||
- k8s/dmuc-easy.md
|
||||
- k8s/dmuc-medium.md
|
||||
- k8s/dmuc-hard.md
|
||||
#- k8s/multinode.md
|
||||
#- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
-
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,136 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- shared/yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
#- k8s/ingress-tls.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/ytt.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/pod-security-intro.md
|
||||
#- k8s/pod-security-policies.md
|
||||
#- k8s/pod-security-admission.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/prometheus-stack.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
- k8s/openebs.md
|
||||
- k8s/stateful-failover.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/operators-example.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
134
slides/kube.yml
134
slides/kube.yml
@@ -1,11 +1,13 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Advanced Kubernetes
|
||||
|
||||
chat: "[Mattermost](https://formintra.enix.io/mattermost)"
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "[Slack](https://ap-guest.slack.com/archives/C88FPJY23)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2024-10-formintra.container.training/
|
||||
slides: https://2023-12-demonware.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -17,76 +19,68 @@ content:
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/yaml-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- shared/toc.md
|
||||
- # 1
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/handson.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/service-types.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- # 2
|
||||
- shared/yaml.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/namespaces.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/demo-apps.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- exercises/yaml-details.md
|
||||
- exercises/localcluster-details.md
|
||||
- # 3
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/healthchecks-details.md
|
||||
- # 4
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/ingress-advanced.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/dmuc-easy.md
|
||||
- exercises/rbac-details.md
|
||||
-
|
||||
- k8s/dmuc-medium.md
|
||||
- k8s/dmuc-hard.md
|
||||
#- k8s/multinode.md
|
||||
#- k8s/cni.md
|
||||
- k8s/apilb.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- exercises/polykuberbac-details.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/operators-example.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/events.md
|
||||
- exercises/sealed-secrets-details.md
|
||||
-
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/cainjector.md
|
||||
- k8s/resource-limits.md
|
||||
- exercises/kyverno-ingress-domain-name-details.md
|
||||
-
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/disruptions.md
|
||||
- k8s/cluster-autoscaler.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/hpa-v2-keda.md
|
||||
- shared/thankyou.md
|
||||
-
|
||||
- |
|
||||
# (Extra material)
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-advanced.md
|
||||
#- k8s/ingress-canary.md
|
||||
- k8s/ingress-tls.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
## Introductions (en 🇫🇷)
|
||||
|
||||
- Bonjour !
|
||||
|
||||
- Sur scène : Julien
|
||||
|
||||
- En backstage : Alexandre, Antoine, Aurélien (x2), Benji, David, Kostas, Nicolas, Paul, Sébastien, Thibault...
|
||||
|
||||
- Horaires : tous les jours de 9h à 13h
|
||||
|
||||
- On fera une pause vers (environ) 11h
|
||||
|
||||
- N'hésitez pas à poser un maximum de questions!
|
||||
|
||||
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
[@jpetazzo]: https://twitter.com/jpetazzo
|
||||
[@jpetazzo@hachyderm.io]: https://hachyderm.io/@jpetazzo
|
||||
[@s0ulshake]: https://twitter.com/s0ulshake
|
||||
[Quantgene]: https://www.quantgene.com/
|
||||
|
||||
---
|
||||
|
||||
## Les 15 minutes du matin
|
||||
|
||||
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
|
||||
|
||||
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
|
||||
|
||||
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
|
||||
|
||||
(avant d'attaquer les choses sérieuses)
|
||||
|
||||
- Puis à 9h15 on rentre dans le vif du sujet
|
||||
|
||||
---
|
||||
|
||||
## Travaux pratiques
|
||||
|
||||
- À la fin de chaque matinée, il y a un exercice pratique concret
|
||||
|
||||
(pour mettre en œuvre ce qu'on a vu)
|
||||
|
||||
- Les exercices font partie de la formation !
|
||||
|
||||
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
|
||||
|
||||
(selon les connaissances et l'aisance de chacun·e)
|
||||
|
||||
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
|
||||
|
||||
- On est là pour vous aider si vous bloquez sur un exercice !
|
||||
|
||||
---
|
||||
|
||||
## Allô Docker¹ ?
|
||||
|
||||
- Chaque après-midi : une heure de questions/réponses ouvertes !
|
||||
|
||||
(sauf le vendredi)
|
||||
|
||||
- Mardi: 15h-16h
|
||||
|
||||
- Mercredi: 16h-17h
|
||||
|
||||
- Jeudi: 14h-15h
|
||||
|
||||
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
|
||||
|
||||
.footnote[¹Clin d'œil à l'excellent ["Quoi de neuf Docker?"][qdnd] de l'excellent [Nicolas Deloof][ndeloof] 🙂]
|
||||
|
||||
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
|
||||
[ndeloof]: https://github.com/ndeloof
|
||||
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/AlloDockerMai2023
|
||||
@@ -1,76 +0,0 @@
|
||||
## Introductions (en 🇫🇷)
|
||||
|
||||
- Bonjour !
|
||||
|
||||
- Sur scène : Ludovic
|
||||
|
||||
- En backstage : Alexandre, Antoine, Aurélien (x2), Benjamin (x2), David, Kostas, Nicolas, Paul, Sébastien, Thibault...
|
||||
|
||||
- Horaires : tous les jours de 9h à 13h
|
||||
|
||||
- On fera une pause vers (environ) 11h
|
||||
|
||||
- N'hésitez pas à poser un maximum de questions!
|
||||
|
||||
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
[@jpetazzo]: https://twitter.com/jpetazzo
|
||||
[@jpetazzo@hachyderm.io]: https://hachyderm.io/@jpetazzo
|
||||
[@s0ulshake]: https://twitter.com/s0ulshake
|
||||
[Quantgene]: https://www.quantgene.com/
|
||||
|
||||
---
|
||||
|
||||
## Les 15 minutes du matin
|
||||
|
||||
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
|
||||
|
||||
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
|
||||
|
||||
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
|
||||
|
||||
(avant d'attaquer les choses sérieuses)
|
||||
|
||||
- Puis à 9h15 on rentre dans le vif du sujet
|
||||
|
||||
---
|
||||
|
||||
## Travaux pratiques
|
||||
|
||||
- À la fin de chaque matinée, il y a un exercice pratique concret
|
||||
|
||||
(pour mettre en œuvre ce qu'on a vu)
|
||||
|
||||
- Les exercices font partie de la formation !
|
||||
|
||||
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
|
||||
|
||||
(selon les connaissances et l'aisance de chacun·e)
|
||||
|
||||
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
|
||||
|
||||
- On est là pour vous aider si vous bloquez sur un exercice !
|
||||
|
||||
---
|
||||
|
||||
## Allô Docker¹ ?
|
||||
|
||||
- Chaque après-midi : une heure de questions/réponses ouvertes !
|
||||
|
||||
(sauf le vendredi)
|
||||
|
||||
- Mardi: 15h-16h
|
||||
|
||||
- Mercredi: 16h-17h
|
||||
|
||||
- Jeudi: 17h-18h
|
||||
|
||||
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
|
||||
|
||||
.footnote[¹Clin d'œil à l'excellent ["Quoi de neuf Docker?"][qdnd] de l'excellent [Nicolas Deloof][ndeloof] 🙂]
|
||||
|
||||
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
|
||||
[ndeloof]: https://github.com/ndeloof
|
||||
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/AlloDockerMai2024
|
||||
@@ -1,16 +1,18 @@
|
||||
## Introductions (en 🇫🇷)
|
||||
## Introductions
|
||||
|
||||
- Bonjour !
|
||||
- Hello! I'm Jérôme Petazzoni ([@jpetazzo], [@jpetazzo@hachyderm.io], Ardan Labs)
|
||||
|
||||
- Jérôme Petazzoni ([@jpetazzo@hachyderm.io])
|
||||
- The training will run from 8am to noon (Vancouver) / 4pm to 8pm (Dublin)
|
||||
|
||||
- Horaires : tous les jours de 9h à 13h
|
||||
- We'll have regular breaks
|
||||
|
||||
- On fera une pause vers (environ) 11h
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- N'hésitez pas à poser un maximum de questions!
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Utilisez @@CHAT@@ pour les questions, demander de l'aide, etc.
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
<!-- -->
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
@@ -21,52 +23,16 @@
|
||||
|
||||
---
|
||||
|
||||
## Les 15 minutes du matin
|
||||
## Exercises
|
||||
|
||||
- Chaque jour, on commencera à 9h par une mini-présentation de 15 minutes
|
||||
- At the end of each day, there is a series of exercises
|
||||
|
||||
(sur un sujet choisi ensemble, pas forcément en relation avec la formation!)
|
||||
- To make the most out of the training, please try the exercises!
|
||||
|
||||
- L'occasion de s'échauffer les neurones avec 🥐/☕️/🍊
|
||||
(it will help to practice and memorize the content of the day)
|
||||
|
||||
(avant d'attaquer les choses sérieuses)
|
||||
- We recommend to take at least one hour to work on the exercises
|
||||
|
||||
- Puis à 9h15 on rentre dans le vif du sujet
|
||||
(if you understood the content of the day, it will be much faster)
|
||||
|
||||
---
|
||||
|
||||
## Travaux pratiques
|
||||
|
||||
- À la fin de chaque matinée, il y a un exercice pratique concret
|
||||
|
||||
(pour mettre en œuvre ce qu'on a vu)
|
||||
|
||||
- Les exercices font partie de la formation !
|
||||
|
||||
- Ils sont prévus pour prendre entre 15 minutes et 2 heures
|
||||
|
||||
(selon les connaissances et l'aisance de chacun·e)
|
||||
|
||||
- Chaque matinée commencera avec un passage en revue de l'exercice de la veille
|
||||
|
||||
- On est là pour vous aider si vous bloquez sur un exercice !
|
||||
|
||||
---
|
||||
|
||||
## En option ...
|
||||
|
||||
- Chaque après-midi : une heure de questions/réponses en visioconférence
|
||||
|
||||
(sauf le dernier jour)
|
||||
|
||||
- Lundi: 14h30-15h30
|
||||
|
||||
- Mardi: 15h00-16h00
|
||||
|
||||
- Mercredi: 15h30-16h30
|
||||
|
||||
- Sur [Jitsi][jitsi] (lien "visioconf" sur le portail de formation)
|
||||
|
||||
[qdnd]: https://www.youtube.com/channel/UCOAhkxpryr_BKybt9wIw-NQ
|
||||
[ndeloof]: https://github.com/ndeloof
|
||||
[jitsi]: https://training.enix.io/jitsi-magic/jitsi.container.training/HighFiveAutomne2024
|
||||
- Each day will start with a quick review of the exercises of the previous day
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Pre-requirements
|
||||
# Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
|
||||
@@ -1,24 +1,11 @@
|
||||
class: title
|
||||
class: title, self-paced
|
||||
|
||||
Merci !
|
||||
|
||||

|
||||
Thank you!
|
||||
|
||||
---
|
||||
|
||||
## Derniers mots...
|
||||
class: title, in-person
|
||||
|
||||
- Le portail de formation reste en ligne après la formation
|
||||
|
||||
- N'hésitez pas à nous contacter via la messagerie instantanée !
|
||||
|
||||
- Les VM ENIX restent en ligne au moins une semaine après la formation
|
||||
|
||||
(mais pas les clusters cloud ; eux on les éteint très vite)
|
||||
|
||||
- N'oubliez pas de remplier les formulaires d'évaluation
|
||||
|
||||
(c'est pas pour nous, c'est une obligation légale😅)
|
||||
|
||||
- Encore **merci** à vous !
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,71 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,80 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,75 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/handson.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,11 +1,3 @@
|
||||
# Note: Ngrok doesn't have an "anonymous" mode anymore.
|
||||
# This means that it requires an authentication token.
|
||||
# That said, all you need is a free account; so if you're
|
||||
# doing the labs on admission webhooks and want to try
|
||||
# this Compose file, I highly recommend that you create
|
||||
# an Ngrok account and set the NGROK_AUTHTOKEN environment
|
||||
# variable to your authentication token.
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
@@ -13,8 +5,6 @@ services:
|
||||
ngrok-echo:
|
||||
image: ngrok/ngrok
|
||||
command: http --log=stdout localhost:3000
|
||||
environment:
|
||||
- NGROK_AUTHTOKEN
|
||||
ports:
|
||||
- 3000
|
||||
|
||||
@@ -26,8 +16,6 @@ services:
|
||||
ngrok-flask:
|
||||
image: ngrok/ngrok
|
||||
command: http --log=stdout localhost:5000
|
||||
environment:
|
||||
- NGROK_AUTHTOKEN
|
||||
ports:
|
||||
- 5000
|
||||
|
||||
|
||||
Reference in New Issue
Block a user