diff --git a/k8s/consul-2.yaml b/k8s/consul-2.yaml index e683aacd..f042770d 100644 --- a/k8s/consul-2.yaml +++ b/k8s/consul-2.yaml @@ -62,11 +62,8 @@ spec: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - consul + matchLabels: + app: consul topologyKey: kubernetes.io/hostname terminationGracePeriodSeconds: 10 containers: @@ -88,7 +85,4 @@ spec: lifecycle: preStop: exec: - command: - - /bin/sh - - -c - - consul leave + command: [ "sh", "-c", "consul leave" ] diff --git a/k8s/consul-3.yaml b/k8s/consul-3.yaml index af62fe0e..f442ea30 100644 --- a/k8s/consul-3.yaml +++ b/k8s/consul-3.yaml @@ -69,11 +69,8 @@ spec: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - persistentconsul + matchLabels: + app: consul topologyKey: kubernetes.io/hostname terminationGracePeriodSeconds: 10 containers: @@ -98,7 +95,4 @@ spec: lifecycle: preStop: exec: - command: - - /bin/sh - - -c - - consul leave + command: [ "sh", "-c", "consul leave" ] diff --git a/k8s/openebs-pod.yaml b/k8s/openebs-pod.yaml new file mode 100644 index 00000000..9a38a86f --- /dev/null +++ b/k8s/openebs-pod.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: openebs-local-hostpath-pod +spec: + volumes: + - name: storage + persistentVolumeClaim: + claimName: local-hostpath-pvc + containers: + - name: better + image: alpine + command: + - sh + - -c + - | + while true; do + echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt + sleep $(($RANDOM % 5 + 20)) + done + volumeMounts: + - mountPath: /mnt/storage + name: storage + diff --git a/k8s/volumes-for-consul.yaml b/k8s/volumes-for-consul.yaml index 8d75e8ea..1cab0afa 100644 --- a/k8s/volumes-for-consul.yaml +++ b/k8s/volumes-for-consul.yaml @@ -3,8 +3,6 @@ apiVersion: v1 kind: PersistentVolume metadata: name: consul-node2 - annotations: - node: node2 spec: capacity: storage: 10Gi @@ -26,8 +24,6 @@ apiVersion: v1 kind: PersistentVolume metadata: name: consul-node3 - annotations: - node: node3 spec: capacity: storage: 10Gi @@ -49,8 +45,6 @@ apiVersion: v1 kind: PersistentVolume metadata: name: consul-node4 - annotations: - node: node4 spec: capacity: storage: 10Gi diff --git a/prepare-eks/10_create_cluster.sh b/prepare-eks/10_create_cluster.sh new file mode 100755 index 00000000..e7dc26de --- /dev/null +++ b/prepare-eks/10_create_cluster.sh @@ -0,0 +1,13 @@ +#!/bin/sh +# Create an EKS cluster. +# This is not idempotent (each time you run it, it creates a new cluster). + +eksctl create cluster \ + --node-type=t3.large \ + --nodes-max=10 \ + --alb-ingress-access \ + --asg-access \ + --ssh-access \ + --with-oidc \ + # + diff --git a/prepare-eks/20_create_users.sh b/prepare-eks/20_create_users.sh new file mode 100755 index 00000000..71a470bc --- /dev/null +++ b/prepare-eks/20_create_users.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# For each user listed in "users.txt", create an IAM user. +# Also create AWS API access keys, and store them in "users.keys". +# This is idempotent (you can run it multiple times, it will only +# create the missing users). However, it will not remove users. +# Note that you can remove users from "users.keys" (or even wipe +# that file out entirely) and then this script will delete their +# keys and generate new keys for them (and add the new keys to +# "users.keys".) + +echo "Getting list of existing users ..." +aws iam list-users --output json | jq -r .Users[].UserName > users.tmp + +for U in $(cat users.txt); do + if ! grep -qw $U users.tmp; then + echo "Creating user $U..." + aws iam create-user --user-name=$U \ + --tags=Key=container.training,Value=1 + fi + if ! grep -qw $U users.keys; then + echo "Listing keys for user $U..." + KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId) + for KEY in $KEYS; do + echo "Deleting key $KEY for user $U..." + aws iam delete-access-key --user=$U --access-key-id=$KEY + done + echo "Creating access key for user $U..." + aws iam create-access-key --user=$U --output json \ + | jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \ + >> users.keys + fi +done diff --git a/prepare-eks/30_create_or_update_policy.sh b/prepare-eks/30_create_or_update_policy.sh new file mode 100755 index 00000000..9c7d11bc --- /dev/null +++ b/prepare-eks/30_create_or_update_policy.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# Create an IAM policy to authorize users to do "aws eks update-kubeconfig". +# This is idempotent, which allows to update the policy document below if +# you want the users to do other things as well. +# Note that each time you run this script, it will actually create a new +# version of the policy, set that version as the default version, and +# remove all non-default versions. (Because you can only have up to +# 5 versions of a given policy, so you need to clean them up.) +# After running that script, you will want to attach the policy to our +# users (check the other scripts in that directory). + +POLICY_NAME=user.container.training +POLICY_DOC='{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "eks:DescribeCluster" + ], + "Resource": "arn:aws:eks:*", + "Effect": "Allow" + } + ] +}' + +ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) + +aws iam create-policy-version \ + --policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \ + --policy-document "$POLICY_DOC" \ + --set-as-default + +# For reference, the command below creates a policy without versioning: +#aws iam create-policy \ +#--policy-name user.container.training \ +#--policy-document "$JSON" + +for VERSION in $( + aws iam list-policy-versions \ + --policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \ + --query 'Versions[?!IsDefaultVersion].VersionId' \ + --output text) +do + aws iam delete-policy-version \ + --policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \ + --version-id "$VERSION" +done + +# For reference, the command below shows all users using the policy: +#aws iam list-entities-for-policy \ +#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME diff --git a/prepare-eks/40_attach_policy.sh b/prepare-eks/40_attach_policy.sh new file mode 100755 index 00000000..42483ecd --- /dev/null +++ b/prepare-eks/40_attach_policy.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# Attach our user policy to all the users defined in "users.txt". +# This should be idempotent, because attaching the same policy +# to the same user multiple times doesn't do anything. + +ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) +POLICY_NAME=user.container.training + +for U in $(cat users.txt); do + echo "Attaching policy to user $U ..." + aws iam attach-user-policy \ + --user-name $U \ + --policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME +done diff --git a/prepare-eks/50_aws_auth.sh b/prepare-eks/50_aws_auth.sh new file mode 100755 index 00000000..f586a30e --- /dev/null +++ b/prepare-eks/50_aws_auth.sh @@ -0,0 +1,24 @@ +#!/bin/sh +# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users. +# Each user defined in "users.txt" will be mapped to a Kubernetes user +# with the same name, and put in the "container.training" group, too. +# This is idempotent. +# WARNING: this will wipe out the mapUsers component of the aws-auth +# ConfigMap, removing all users that aren't in "users.txt". +# It won't touch mapRoles, so it shouldn't break the role mappings +# put in place by EKS. + +ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) + +rm -f users.map +for U in $(cat users.txt); do +echo "\ +- userarn: arn:aws:iam::$ACCOUNT:user/$U + username: $U + groups: [ container.training ]\ +" >> users.map +done + +kubectl create --namespace=kube-system configmap aws-auth \ + --dry-run=client --from-file=mapUsers=users.map -o yaml \ + | kubectl apply -f- diff --git a/prepare-eks/60_setup_rbac_and_ns.sh b/prepare-eks/60_setup_rbac_and_ns.sh new file mode 100755 index 00000000..3d52f2c9 --- /dev/null +++ b/prepare-eks/60_setup_rbac_and_ns.sh @@ -0,0 +1,65 @@ +#!/bin/sh +# Create a shared Kubernetes Namespace ("container-training") as well as +# individual namespaces for every user in "users.txt", and set up a bunch +# of permissions. +# Specifically: +# - each user gets "view" permissions in the "default" Namespace +# - each user gets "edit" permissions in the "container-training" Namespace +# - each user gets permissions to list Nodes and Namespaces +# - each user gets "admin" permissions in their personal Namespace +# Note that since Kubernetes Namespaces can't have dots in their names, +# if a user has dots, dots will be mapped to dashes. +# So user "ada.lovelace" will get namespace "ada-lovelace". +# This is kind of idempotent (but will raise a bunch of errors for objects +# that already exist). +# TODO: if this needs to evolve, replace all the "create" operations by +# "apply" operations. But this is good enough for now. + +kubectl create rolebinding --namespace default container.training \ + --group=container.training --clusterrole=view + +kubectl create clusterrole view-nodes \ + --verb=get,list,watch --resource=node +kubectl create clusterrolebinding view-nodes \ + --group=container.training --clusterrole=view-nodes + +kubectl create clusterrole view-namespaces \ + --verb=get,list,watch --resource=namespace +kubectl create clusterrolebinding view-namespaces \ + --group=container.training --clusterrole=view-namespaces + +kubectl create namespace container-training +kubectl create rolebinding --namespace container-training edit \ + --group=container.training --clusterrole=edit + +# Note: API calls to EKS tend to be fairly slow. To optimize things a bit, +# instead of running "kubectl" N times, we generate a bunch of YAML and +# apply it. It will still generate a lot of API calls but it's much faster +# than calling "kubectl" N times. It might be possible to make this even +# faster by generating a "kind: List" (I don't know if this would issue +# a single API calls or multiple ones; TBD!) +for U in $(cat users.txt); do + NS=$(echo $U | tr . -) + cat < /tmp/policy.json +aws iam update-assume-role-policy \ + --role-name $ROLE_NAME \ + --policy-document file:///tmp/policy.json diff --git a/prepare-eks/80_s3_bucket.sh b/prepare-eks/80_s3_bucket.sh new file mode 100755 index 00000000..66539250 --- /dev/null +++ b/prepare-eks/80_s3_bucket.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# Create an S3 bucket with two objects in it: +# - public.txt (world-readable) +# - private.txt (private) +# Also create an IAM policy granting read-only access to the bucket +# (and therefore, to the private object). +# Finally, attach the policy to an IAM role (for instance, the role +# created by another script in this directory). +# This isn't idempotent, but it can be made idempotent by replacing the +# "aws iam create-policy" call with "aws iam create-policy-version" and +# a bit of extra elbow grease. (See other scripts in this directory for +# an example). + +ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) +BUCKET=container.training +ROLE_NAME=s3-reader-container-training +POLICY_NAME=s3-reader-container-training +POLICY_DOC=$(envsubst < tags/$TAG/status sep "Deploying tag $TAG" - # Wait for cloudinit to be done + # If this VM image is using cloud-init, + # wait for cloud-init to be done pssh " - while [ ! -f /var/lib/cloud/instance/boot-finished ]; do - sleep 1 - done" + if [ -d /var/lib/cloud ]; then + while [ ! -f /var/lib/cloud/instance/boot-finished ]; do + sleep 1 + done + fi" # Special case for scaleway since it doesn't come with sudo if [ "$INFRACLASS" = "scaleway" ]; then @@ -102,6 +105,12 @@ _cmd_deploy() { sudo apt-get update && sudo apt-get install -y python-yaml" + # If there is no "python" binary, symlink to python3 + #pssh " + #if ! which python; then + # ln -s $(which python3) /usr/local/bin/python + #fi" + # Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses pssh -I tee /tmp/postprep.py >/tmp/pp.out 2>>/tmp/pp.err" /tmp/token && @@ -582,7 +598,7 @@ _cmd_start() { case "$1" in --infra) INFRA=$2; shift 2;; --settings) SETTINGS=$2; shift 2;; - --count) COUNT=$2; shift 2;; + --count) die "Flag --count is deprecated; please use --students instead." ;; --tag) TAG=$2; shift 2;; --students) STUDENTS=$2; shift 2;; *) die "Unrecognized parameter: $1." diff --git a/prepare-vms/lib/infra/linode.sh b/prepare-vms/lib/infra/linode.sh new file mode 100644 index 00000000..8a7630d3 --- /dev/null +++ b/prepare-vms/lib/infra/linode.sh @@ -0,0 +1,58 @@ +if ! command -v linode-cli >/dev/null; then + warn "Linode CLI (linode-cli) not found." +fi +if ! [ -f ~/.config/linode-cli ]; then + warn "~/.config/linode-cli not found." +fi + +# To view available regions: "linode-cli regions list" +LINODE_REGION=${LINODE_REGION-us-west} + +# To view available types: "linode-cli linodes types" +LINODE_TYPE=${LINODE_TYPE-g6-standard-2} + +infra_list() { + linode-cli linodes list --json | + jq -r '.[] | [.id, .label, .status, .type] | @tsv' +} + +infra_start() { + COUNT=$1 + + for I in $(seq 1 $COUNT); do + NAME=$(printf "%s-%03d" $TAG $I) + sep "Starting instance $I/$COUNT" + info " Zone: $LINODE_REGION" + info " Name: $NAME" + info " Instance type: $LINODE_TYPE" + ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)" + linode-cli linodes create \ + --type=${LINODE_TYPE} --region=${LINODE_REGION} \ + --image=linode/ubuntu18.04 \ + --authorized_keys="${LINODE_SSHKEY}" \ + --root_pass="${ROOT_PASS}" \ + --tags=${TAG} --label=${NAME} + done + sep + + linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt +} + +infra_stop() { + info "Counting instances..." + linode_get_ids_by_tag $TAG | wc -l + info "Deleting instances..." + linode_get_ids_by_tag $TAG | + xargs -n1 -P10 \ + linode-cli linodes delete +} + +linode_get_ids_by_tag() { + TAG=$1 + linode-cli linodes list --tags $TAG --json | jq -r ".[].id" +} + +linode_get_ips_by_tag() { + TAG=$1 + linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]" +} diff --git a/prepare-vms/lib/pssh.sh b/prepare-vms/lib/pssh.sh index ca3bc639..fb855696 100644 --- a/prepare-vms/lib/pssh.sh +++ b/prepare-vms/lib/pssh.sh @@ -18,11 +18,11 @@ pssh() { echo "[parallel-ssh] $@" export PSSH=$(which pssh || which parallel-ssh) - if [ "$INFRACLASS" = hetzner ]; then - LOGIN=root - else - LOGIN=ubuntu - fi + case "$INFRACLASS" in + hetzner) LOGIN=root ;; + linode) LOGIN=root ;; + *) LOGIN=ubuntu ;; + esac $PSSH -h $HOSTFILE -l $LOGIN \ --par 100 \ diff --git a/slides/containers/Compose_For_Dev_Stacks.md b/slides/containers/Compose_For_Dev_Stacks.md index 4f660881..d978c6f5 100644 --- a/slides/containers/Compose_For_Dev_Stacks.md +++ b/slides/containers/Compose_For_Dev_Stacks.md @@ -329,4 +329,4 @@ This is ideal to debug regressions, do side-by-side comparisons, etc. :EN:- Connecting services together with a *Compose file* :FR:- Utiliser Compose pour décrire son environnement -:FR:- Écrire un *Compose file* pour connecter les services entre eux \ No newline at end of file +:FR:- Écrire un *Compose file* pour connecter les services entre eux diff --git a/slides/containers/Container_Network_Model.md b/slides/containers/Container_Network_Model.md index e39349b8..bbe4c45a 100644 --- a/slides/containers/Container_Network_Model.md +++ b/slides/containers/Container_Network_Model.md @@ -742,3 +742,15 @@ class: extra-details * This may be used to access an internal package repository. (But try to use a multi-stage build instead, if possible!) + +??? + +:EN:Container networking essentials +:EN:- The Container Network Model +:EN:- Container isolation +:EN:- Service discovery + +:FR:Mettre ses conteneurs en réseau +:FR:- Le "Container Network Model" +:FR:- Isolation des conteneurs +:FR:- *Service discovery* diff --git a/slides/containers/Container_Networking_Basics.md b/slides/containers/Container_Networking_Basics.md index 4ce0fc49..23dc8eb9 100644 --- a/slides/containers/Container_Networking_Basics.md +++ b/slides/containers/Container_Networking_Basics.md @@ -229,10 +229,5 @@ containers together without exposing their ports. ??? -:EN:Connecting containers -:EN:- Container networking basics -:EN:- Exposing a container - -:FR:Connecter les conteneurs -:FR:- Description du modèle réseau des conteneurs -:FR:- Exposer un conteneur +:EN:- Exposing single containers +:FR:- Exposer un conteneur isolé diff --git a/slides/containers/Copying_Files_During_Build.md b/slides/containers/Copying_Files_During_Build.md index 2d58d287..a57386df 100644 --- a/slides/containers/Copying_Files_During_Build.md +++ b/slides/containers/Copying_Files_During_Build.md @@ -101,5 +101,5 @@ Success! ??? -:EN:- The build cache +:EN:- Leveraging the build cache for faster builds :FR:- Tirer parti du cache afin d'optimiser la vitesse de *build* diff --git a/slides/containers/Dockerfile_Tips.md b/slides/containers/Dockerfile_Tips.md index 485df970..6b402906 100644 --- a/slides/containers/Dockerfile_Tips.md +++ b/slides/containers/Dockerfile_Tips.md @@ -434,5 +434,12 @@ services: ??? +:EN:Optimizing images :EN:- Dockerfile tips, tricks, and best practices -:FR:- Bonnes pratiques pour la construction des images +:EN:- Reducing build time +:EN:- Reducing image size + +:FR:Optimiser ses images +:FR:- Bonnes pratiques, trucs et astuces +:FR:- Réduire le temps de build +:FR:- Réduire la taille des images diff --git a/slides/containers/Network_Drivers.md b/slides/containers/Network_Drivers.md index 3d487370..216579b5 100644 --- a/slides/containers/Network_Drivers.md +++ b/slides/containers/Network_Drivers.md @@ -82,3 +82,12 @@ Use cases: * Those containers can communicate over their `lo` interface.
(i.e. one can bind to 127.0.0.1 and the others can connect to it.) +??? + +:EN:Advanced container networking +:EN:- Transparent network access with the "host" driver +:EN:- Sharing is caring with the "container" driver + +:FR:Paramétrage réseau avancé +:FR:- Accès transparent au réseau avec le mode "host" +:FR:- Partage de la pile réseau avece le mode "container" diff --git a/slides/images/control-planes/advanced-control-plane-split-events.svg b/slides/images/control-planes/advanced-control-plane-split-events.svg new file mode 100644 index 00000000..ae30df7d --- /dev/null +++ b/slides/images/control-planes/advanced-control-plane-split-events.svg @@ -0,0 +1,3921 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ADVANCED CONTROL PLANE + WORKERS(SPLIT EVENTS) + + + + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + passive + + + + active + + + + + + + + + + + + + + + + + + + + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + app pod + + + + some combination of VMs,containers, pods ... + APIload balancer + + + + API server + + API server + + API server + + API server + + API server + + API server + + scheduler + + scheduler + + + + controller manager + + controller manager + + + + + + + + etcd + + + + + + + + + + etcd + + + + etcd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + load balancer,DNS RR, etcdserver list ... + + + + etcd (events) + + + + + + + + + + etcd (events) + + + + etcd (events) + + + + + + + + + + + + + + + + + + + + + + + + + + load balancer,DNS RR, etcdserver list ... + + diff --git a/slides/images/control-planes/advanced-control-plane.svg b/slides/images/control-planes/advanced-control-plane.svg new file mode 100644 index 00000000..84ff5350 --- /dev/null +++ b/slides/images/control-planes/advanced-control-plane.svg @@ -0,0 +1,3596 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ADVANCED CONTROL PLANE + WORKERS + + + + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + passive + + + + active + + + + + + + + + + + + + + + + + + + + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + app pod + + + + some combination of VMs,containers, pods ... + APIload balancer + + + + API server + + API server + + API server + + API server + + API server + + API server + + scheduler + + scheduler + + + + controller manager + + controller manager + + + + + + + + etcd + + + + + + + + + + etcd + + + + etcd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + load balancer,DNS RR, etcdserver list ... + diff --git a/slides/images/control-planes/managed-kubernetes.svg b/slides/images/control-planes/managed-kubernetes.svg new file mode 100644 index 00000000..30a02615 --- /dev/null +++ b/slides/images/control-planes/managed-kubernetes.svg @@ -0,0 +1,1294 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MANAGED KUBERNETES + + + + controllermanager + + + + scheduler + + control plane(operated by provider) + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + + + + + + + + + + + + + + pod + + + + kubelet + + + + kubelet + + + + + + pod + + + + + + pod + + + + + + pod + + + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + + ? + API server + + + + + + cloudcontrollermanager + etcd + + + diff --git a/slides/images/control-planes/non-dedicated-stacked-nodes.svg b/slides/images/control-planes/non-dedicated-stacked-nodes.svg new file mode 100644 index 00000000..f0bd1033 --- /dev/null +++ b/slides/images/control-planes/non-dedicated-stacked-nodes.svg @@ -0,0 +1,3132 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CONTROL PLANE AND APPS RUNNING ON THE SAME NODES(SHOWN HERE WITH STACKED CONTROL PLANE) + + + + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + + + + + + + + + + + + + + APIload balancer + + + + + + + + + + + passive + + + + + + + + + active + + + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + ... + + + + + + + + + + + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + diff --git a/slides/images/control-planes/single-control-and-workers.svg b/slides/images/control-planes/single-control-and-workers.svg new file mode 100644 index 00000000..3bec2e53 --- /dev/null +++ b/slides/images/control-planes/single-control-and-workers.svg @@ -0,0 +1,1611 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SINGLE-NODE CONTROL PLANE + WORKERS(DEPLOYED WITH KUBEADM) + + + + + + + + + + + + + + + + + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + kubelet + + container engine + + + + + + + + + API server + + + + kubelet + + container engine + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + other pods... + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + API server + + + diff --git a/slides/images/control-planes/single-node-dev.svg b/slides/images/control-planes/single-node-dev.svg new file mode 100644 index 00000000..828de926 --- /dev/null +++ b/slides/images/control-planes/single-node-dev.svg @@ -0,0 +1,914 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SINGLE-NODE CLUSTER (FOR DEVELOPMENT) + + + API server + + + + controllermanager + + + + scheduler + + VM or container + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + etcd + + + + + + + + + + kubelet + + + + + + pod + + + + + containerengine + + + + + + + + + + pod + + + + + + pod + + + diff --git a/slides/images/control-planes/stacked-control-plane.svg b/slides/images/control-planes/stacked-control-plane.svg new file mode 100644 index 00000000..432209b2 --- /dev/null +++ b/slides/images/control-planes/stacked-control-plane.svg @@ -0,0 +1,3940 @@ + + + + + + image/svg+xml + + how-does-k8s-work + + + + + how-does-k8s-work + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + STACKED CONTROL PLANE + WORKERS(DEPLOYED WITH KUBEADM) + + + + + + + + + + + + kubelet + + container engine + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + other pods... + + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + scheduler + + + + + + + + + other pods... + + + + + + + + + + + + + + + + API server + + + + kubelet + + container engine + + + + + + + + + + + + + + etcd + + + + + + + + + controller manager + + + + + + + + + passive + + + + + + + + + other pods... + + + + + + + + + active + + + + + + + + + + + + + + + + + + + + + + APIload balancer + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + 👩🏼‍💻👨🏾‍💻🤖 + + $ kubectl ... + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + kubelet + + container engine + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + app pod + + + + + + + + + + + + + + app pod + + + + + diff --git a/slides/k8s/access-eks-cluster.md b/slides/k8s/access-eks-cluster.md new file mode 100644 index 00000000..6bd0d845 --- /dev/null +++ b/slides/k8s/access-eks-cluster.md @@ -0,0 +1,104 @@ +## Accessing our EKS cluster + +- We also have a shared EKS cluster + +- With individual IAM users + +- Let's connect to this cluster! + +--- + +## What we need + +- `kubectl` (obviously!) + +- `aws` CLI (recent-ish version) + + (or `aws` CLI + `aws-iam-authenticator` plugin) + +- AWS API access key and secret access key + +- AWS region + +- EKS cluster name + +--- + +## Setting up AWS credentials + +- There are many ways to do this + +- We're going to use environment variables + +- You're welcome to use whatever you like (e.g. AWS profiles) + +.exercise[ + +- Set the AWS region, API access key, and secret key: + ```bash + export AWS_DEFAULT_REGION=`us-east-2` + export AWS_ACCESS_KEY_ID=`AKI...` + export AWS_SECRET_ACCESS_KEY=`xyz123...` + ``` + +- Check that the AWS API recognizes us: + ```bash + aws sts get-caller-identity + ``` + +] + +--- + +## Updating our kubeconfig file + +- Now we can use the AWS CLI to: + + - obtain the Kubernetes API address + + - register it in our kubeconfig file + +.exercise[ + +- Update our kubeconfig file: + ```bash + aws eks update-kubeconfig --name `fancy-clustername-1234` + ``` + +- Run some harmless command: + ```bash + kubectl version + ``` + +] + +--- + +## Our resources + +- We have the following permissions: + + - `view` in the `default` namespace + + - `edit` in the `container-training` namespace + + - `admin` in our personal namespace + +- Our personal namespace is our IAM user name + + (but with dots replaced with dashes) + +- For instance, user `ada.lovelace` has namespace `ada-lovelace` + +--- + +## Deploying things + +- Let's deploy DockerCoins in our personal namespace! + +- Expose the Web UI with a `LoadBalancer` service + +??? + +:EN:- Working with an EKS cluster +:FR:- Travailler avec un cluster EKS diff --git a/slides/k8s/accessinternal.md b/slides/k8s/accessinternal.md index a647c38a..d70c882d 100644 --- a/slides/k8s/accessinternal.md +++ b/slides/k8s/accessinternal.md @@ -134,3 +134,17 @@ installed and set up `kubectl` to communicate with your cluster. :EN:- Securely accessing internal services :FR:- Accès sécurisé aux services internes + +:T: Accessing internal services from our local machine + +:Q: What's the advantage of "kubectl port-forward" compared to a NodePort? +:A: It can forward arbitrary protocols +:A: It doesn't require Kubernetes API credentials +:A: It offers deterministic load balancing (instead of random) +:A: ✔️It doesn't expose the service to the public + +:Q: What's the security concept behind "kubectl port-forward"? +:A: ✔️We authenticate with the Kubernetes API, and it forwards connections on our behalf +:A: It detects our source IP address, and only allows connections coming from it +:A: It uses end-to-end mTLS (mutual TLS) to authenticate our connections +:A: There is no security (as long as it's running, anyone can connect from anywhere) diff --git a/slides/k8s/authn-authz.md b/slides/k8s/authn-authz.md index b6ae85ca..a1c79131 100644 --- a/slides/k8s/authn-authz.md +++ b/slides/k8s/authn-authz.md @@ -733,17 +733,19 @@ class: extra-details ## Figuring out who can do what -- For auditing purposes, sometimes we want to know who can perform an action +- For auditing purposes, sometimes we want to know who can perform which actions -- There are a few tools to help us with that +- There are a few tools to help us with that, available as `kubectl` plugins: - - [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security + - `kubectl who-can` / [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security - - [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess) + - `kubectl access-matrix` / [Rakkess (Review Access)](https://github.com/corneliusweig/rakkess) by Cornelius Weig -- Both are available as standalone programs, or as plugins for `kubectl` + - `kubectl rbac-lookup` / [RBAC Lookup](https://github.com/FairwindsOps/rbac-lookup) by FairwindsOps - (`kubectl` plugins can be installed and managed with `krew`) +- `kubectl` plugins can be installed and managed with `krew` + +- They can also be installed and executed as standalone programs ??? diff --git a/slides/k8s/cert-manager.md b/slides/k8s/cert-manager.md index dee9193f..61c65b10 100644 --- a/slides/k8s/cert-manager.md +++ b/slides/k8s/cert-manager.md @@ -223,6 +223,24 @@ spec: class: extra-details +## Automatic TLS Ingress with annotations + +- It is also possible to annotate Ingress resources for cert-manager + +- If we annotate an Ingress resource with `cert-manager.io/cluster-issuer=xxx`: + + - cert-manager will detect that annotation + + - it will obtain a certificate using the specified ClusterIssuer (`xxx`) + + - it will store the key and certificate in the specified Secret + +- Note: the Ingress still needs the `tls` section with `secretName` and `hosts` + +--- + +class: extra-details + ## Let's Encrypt and nip.io - Let's Encrypt has [rate limits](https://letsencrypt.org/docs/rate-limits/) per domain @@ -242,3 +260,5 @@ class: extra-details :EN:- Obtaining certificates with cert-manager :FR:- Obtenir des certificats avec cert-manager + +:T: Obtaining TLS certificates with cert-manager diff --git a/slides/k8s/concepts-k8s.md b/slides/k8s/concepts-k8s.md index f8d3a0db..adfc0336 100644 --- a/slides/k8s/concepts-k8s.md +++ b/slides/k8s/concepts-k8s.md @@ -220,6 +220,41 @@ class: extra-details --- +class: pic +![](images/control-planes/single-node-dev.svg) + +--- + +class: pic +![](images/control-planes/managed-kubernetes.svg) + +--- + +class: pic +![](images/control-planes/single-control-and-workers.svg) + +--- + +class: pic +![](images/control-planes/stacked-control-plane.svg) + +--- + +class: pic +![](images/control-planes/non-dedicated-stacked-nodes.svg) + +--- + +class: pic +![](images/control-planes/advanced-control-plane.svg) + +--- + +class: pic +![](images/control-planes/advanced-control-plane-split-events.svg) + +--- + class: extra-details ## How many nodes should a cluster have? diff --git a/slides/k8s/helm-chart-format.md b/slides/k8s/helm-chart-format.md index c0a5ada0..a3352f3d 100644 --- a/slides/k8s/helm-chart-format.md +++ b/slides/k8s/helm-chart-format.md @@ -40,7 +40,22 @@ - a `Chart.yaml` file, containing metadata (name, version, description ...) -- Let's look at a simple chart, `stable/tomcat` +- Let's look at a simple chart for a basic demo app + +--- + +## Adding the repo + +- If you haven't done it before, you need to add the repo for that chart + +.exercise[ + +- Add the repo that holds the chart for the OWASP Juice Shop: + ```bash + helm repo add juice https://charts.securecodebox.io + ``` + +] --- @@ -50,17 +65,17 @@ .exercise[ -- Download the tarball for `stable/tomcat`: +- Download the tarball for `juice/juice-shop`: ```bash - helm pull stable/tomcat + helm pull juice/juice-shop ``` - (This will create a file named `tomcat-X.Y.Z.tgz`.) + (This will create a file named `juice-shop-X.Y.Z.tgz`.) -- Or, download + untar `stable/tomcat`: +- Or, download + untar `juice/juice-shop`: ```bash - helm pull stable/tomcat --untar + helm pull juice/juice-shop --untar ``` - (This will create a directory named `tomcat`.) + (This will create a directory named `juice-shop`.) ] @@ -68,13 +83,13 @@ ## Looking at the chart's content -- Let's look at the files and directories in the `tomcat` chart +- Let's look at the files and directories in the `juice-shop` chart .exercise[ - Display the tree structure of the chart we just downloaded: ```bash - tree tomcat + tree juice-shop ``` ] @@ -93,12 +108,11 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml` (using the standard Go template library) - .exercise[ -- Look at the template file for the tomcat Service resource: +- Look at the template file for the Service resource: ```bash - cat tomcat/templates/appsrv-svc.yaml + cat juice-shop/templates/service.yaml ``` ] @@ -190,7 +204,7 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml` - At the top-level of the chart, it's a good idea to have a README -- It will be viewable with e.g. `helm show readme stable/tomcat` +- It will be viewable with e.g. `helm show readme juice/juice-shop` - In the `templates/` directory, we can also have a `NOTES.txt` file diff --git a/slides/k8s/helm-dependencies.md b/slides/k8s/helm-dependencies.md new file mode 100644 index 00000000..9d9d575d --- /dev/null +++ b/slides/k8s/helm-dependencies.md @@ -0,0 +1,338 @@ +# Charts using other charts + +- Helm charts can have *dependencies* on other charts + +- These dependencies will help us to share or reuse components + + (so that we write and maintain less manifests, less templates, less code!) + +- As an example, we will use a community chart for Redis + +- This will help people who write charts, and people who use them + +- ... And potentially remove a lot of code! ✌️ + +--- + +## Redis in DockerCoins + +- In the DockerCoins demo app, we have 5 components: + + - 2 internal webservices + - 1 worker + - 1 public web UI + - 1 Redis data store + +- Every component is running some custom code, except Redis + +- Every component is using a custom image, except Redis + + (which is using the official `redis` image) + +- Could we use a standard chart for Redis? + +- Yes! Dependencies to the rescue! + +--- + +## Adding our dependency + +- First, we will add the dependency to the `Chart.yaml` file + +- Then, we will ask Helm to download that dependency + +- We will also *lock* the dependency + + (lock it to a specific version, to ensure reproducibility) + +--- + +## Declaring the dependency + +- First, let's edit `Chart.yaml` + +.exercise[ + +- In `Chart.yaml`, fill the `dependencies` section: + ```yaml + dependencies: + - name: redis + version: 11.0.5 + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled + ``` + +] + +Where do that `repository` and `version` come from? + +We're assuming here that we did our reserach, +or that our resident Helm expert advised us to +use Bitnami's Redis chart. + +--- + +## Conditions + +- The `condition` field gives us a way to enable/disable the dependency: + ```yaml + conditions: redis.enabled + ``` + +- Here, we can disable Redis with the Helm flag `--set redis.enabled=false` + + (or set that value in a `values.yaml` file) + +- Of course, this is mostly useful for *optional* dependencies + + (otherwise, the app ends up being broken since it'll miss a component) + +--- + +## Lock & Load! + +- After adding the dependency, we ask Helm to pin an download it + +.exercise[ + +- Ask Helm: + ```bash + helm dependency update + ``` + + (Or `helm dep up`) + +] + +- This wil create `Chart.lock` and fetch the dependency + +--- + +## What's `Chart.lock`? + +- This is a common pattern with dependencies + + (see also: `Gemfile.lock`, `package.json.lock`, and many others) + +- This lets us define loose dependencies in `Chart.yaml` + + (e.g. "version 11.whatever, but below 12") + +- But have the exact version used in `Chart.lock` + +- This ensures reproducible deployments + +- `Chart.lock` can (should!) be added to our source tree + +- `Chart.lock` can (should!) regularly be updated + +--- + +## Loose dependencies + +- Here is an example of loose version requirement: + ```yaml + dependencies: + - name: redis + version: ">=11 <12" + repository: https://charts.bitnami.com/bitnami + ``` + +- This makes sure that we have the most recent version in the 11.x train + +- ... But without upgrading to version 12.x + + (because it might be incompatible) + +--- + +## `build` vs `update` + +- Helm actually offers two commands to manage dependencies: + + `helm dependency build` = fetch dependencies listed in `Chart.lock` + + `helm dependency update` = update `Chart.lock` (and run `build`) + +- When the dependency gets updated, we can/should: + + - `helm dep up` (update `Chart.lock` and fetch new chart) + + - test! + + - if everything is fine, `git add Chart.lock` and commit + +--- + +## Where are my dependencies? + +- Dependencies are downloaded to the `charts/` subdirectory + +- When they're downloaded, they stay in compressed format (`.tgz`) + +- Should we commit them to our code repository? + +- Pros: + + - more resilient to internet/mirror failures/decomissioning + +- Cons: + + - can add a lot of weight to the repo if charts are big or change often + + - this can be solved by extra tools like git-lfs + +--- + +## Dependency tuning + +- DockerCoins expects the `redis` Service to be named `redis` + +- Our Redis chart uses a different Service name by default + +- Service name is `{{ template "redis.fullname" . }}-master` + +- `redis.fullname` looks like this: + ``` + {{- define "redis.fullname" -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else -}} + [...] + {{- end }} + {{- end }} + ``` + +- How do we fix this? + +--- + +## Setting dependency variables + +- If we set `fullnameOverride` to `redis`: + + - the `{{ template ... }}` block will output `redis` + + - the Service name will be `redis-master` + +- A parent chart can set values for its dependencies + +- For example, in the parent's `values.yaml`: + + ```yaml + redis: # Name of the dependency + fullnameOverride: redis # Value passed to redis + cluster: # Other values passed to redis + enabled: false + ``` + +- User can also set variables with `--set=` or with `--values=` + +--- + +class: extra-details + +## Passing templates + +- We can even pass template `{{ include "template.name" }}`, but warning: + + - need to be evaluated with the `tpl` function, on the child side + + - evaluated in the context of the child, with no access to parent variables + + + +--- + +## Getting rid of the `-master` + +- Even if we set that `fullnameOverride`, the Service name will be `redis-master` + +- To remove the `-master` suffix, we need to edit the chart itself + +- To edit the Redis chart, we need to *embed* it in our own chart + +- We need to: + + - decompress the chart + + - adjust `Chart.yaml` accordingly + +--- + +## Embedding a dependency + +.exercise[ + +- Decompress the chart: + ```yaml + cd charts + tar zxf redis-*.tgz + cd .. + ``` + +- Edit `Chart.yaml` and update the `dependencies` section: + ```yaml + dependencies: + - name: redis + version: '*' # No need to constraint version, from local files + ``` + +- Run `helm dep update` + +] + +--- + +## Updating the dependency + +- Now we can edit the Service name + + (it should be in `charts/redis/templates/redis-master-svc.yaml`) + +- Then try to deploy the whole chart! + +--- + +## Embedding a dependency multiple times + +- What if we need multiple copies of the same subchart? + + (for instance, if we need two completely different Redis servers) + +- We can declare a dependency multiple times, and specify an `alias`: + ```yaml + dependencies: + - name: redis + version: '*' + alias: querycache + - name: redis + version: '*' + alias: celeryqueue + ``` + +- `.Chart.Name` will be set to the `alias` + +--- + +class: extra-details + +## Compatibility with Helm 2 + +- Chart `apiVersion: v1` is the only version supported by Helm 2 + +- Chart v1 is also supported by Helm 3 + +- Use v1 if you want to be compatible with Helm 2 + +- Instead of `Chart.yaml`, dependencies are defined in `requirements.yaml` + + (and we should commit `requirements.lock` instead of `Chart.lock`) + +??? + +:EN:- Depending on other charts +:EN:- Charts within charts + +:FR:- Dépendances entre charts +:FR:- Un chart peut en cacher un autre diff --git a/slides/k8s/helm-intro.md b/slides/k8s/helm-intro.md index 6edd0b62..c6e02fbb 100644 --- a/slides/k8s/helm-intro.md +++ b/slides/k8s/helm-intro.md @@ -229,71 +229,95 @@ fine for personal and development clusters.) --- -## Managing repositories - -- Let's check what repositories we have, and add the `stable` repo - - (the `stable` repo contains a set of official-ish charts) - -.exercise[ - -- List our repos: - ```bash - helm repo list - ``` - -- Add the `stable` repo: - ```bash - helm repo add stable https://charts.helm.sh/stable - ``` - -] - -Adding a repo can take a few seconds (it downloads the list of charts from the repo). - -It's OK to add a repo that already exists (it will merely update it). - ---- - class: extra-details -## Deprecation warning +## How to find charts, the old way -- That "stable" is being deprecated, in favor of a more decentralized approach +- Helm 2 came with one pre-configured repo, the "stable" repo - (each community / company / group / project hosting their own repository) + (located at https://charts.helm.sh/stable) -- We're going to use it here for educational purposes +- Helm 3 doesn't have any pre-configured repo -- But if you're looking for production-grade charts, look elsewhere! +- The "stable" repo mentioned above is now being deprecated - (namely, on the Helm Hub) +- The new approach is to have fully decentralized repos + +- Repos can be indexed in the Artifact Hub + + (which supersedes the Helm Hub) --- -## Search available charts +## How to find charts, the new way -- We can search available charts with `helm search` +- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io) -- We need to specify where to search (only our repos, or Helm Hub) +- Or use `helm search hub ...` from the CLI -- Let's search for all charts mentioning tomcat! +- Let's try to find a Helm chart for something called "OWASP Juice Shop"! + + (it is a famous demo app used in security challenges) + +--- + +## Finding charts from the CLI + +- We can use `helm search hub ` .exercise[ -- Search for tomcat in the repo that we added earlier: +- Look for the OWASP Juice Shop app: ```bash - helm search repo tomcat + helm search hub owasp juice ``` -- Search for tomcat on the Helm Hub: +- Since the URLs are truncated, try with the YAML output: ```bash - helm search hub tomcat + helm search hub owasp juice -o yaml ``` ] -[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server. +Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop + +--- + +## Finding charts on the web + +- We can also use the Artifact Hub search feature + +.exercise[ + +- Go to https://artifacthub.io/ + +- In the search box on top, enter "owasp juice" + +- Click on the "juice-shop" result (not "multi-juicer" or "juicy-ctf") + +] + +--- + +## Installing the chart + +- Click on the "Install" button, it will show instructions + +.exercise[ + +- First, add the repository for that chart: + ```bash + helm repo add juice https://charts.securecodebox.io + ``` + +- Then, install the chart: + ```bash + helm install my-juice-shop juice/juice-shop + ``` + +] + +Note: it is also possible to install directly a chart, with `--repo https://...` --- @@ -301,22 +325,22 @@ class: extra-details - "Installing a chart" means creating a *release* -- We need to name that release +- In the previous exemple, the release was named "my-juice-shop" - (or use the `--generate-name` to get Helm to generate one for us) +- We can also use `--generate-name` to ask Helm to generate a name for us .exercise[ -- Install the tomcat chart that we found earlier: - ```bash - helm install java4ever stable/tomcat - ``` - - List the releases: ```bash helm list ``` +- Check that we have a `my-juice-shop-...` Pod up and running: + ```bash + kubectl get pods + ``` + ] --- @@ -329,13 +353,13 @@ class: extra-details - The `helm search` command only takes a search string argument - (e.g. `helm search tomcat`) + (e.g. `helm search juice-shop`) - With Helm 2, the name is optional: - `helm install stable/tomcat` will automatically generate a name + `helm install juice/juice-shop` will automatically generate a name - `helm install --name java4ever stable/tomcat` will specify a name + `helm install --name my-juice-shop juice/juice-shop` will specify a name --- @@ -349,12 +373,12 @@ class: extra-details - List all the resources created by this release: ```bash - kubectl get all --selector=release=java4ever + kubectl get all --selector=app.kubernetes.io/instance=my-juice-shop ``` ] -Note: this `release` label wasn't added automatically by Helm. +Note: this label wasn't added automatically by Helm.
It is defined in that chart. In other words, not all charts will provide this label. @@ -362,11 +386,11 @@ It is defined in that chart. In other words, not all charts will provide this la ## Configuring a release -- By default, `stable/tomcat` creates a service of type `LoadBalancer` +- By default, `juice/juice-shop` creates a service of type `ClusterIP` - We would like to change that to a `NodePort` -- We could use `kubectl edit service java4ever-tomcat`, but ... +- We could use `kubectl edit service my-juice-shop`, but ... ... our changes would get overwritten next time we update that chart! @@ -386,14 +410,14 @@ It is defined in that chart. In other words, not all charts will provide this la .exercise[ -- Look at the README for tomcat: +- Look at the README for the app: ```bash - helm show readme stable/tomcat + helm show readme juice/juice-shop ``` - Look at the values and their defaults: ```bash - helm show values stable/tomcat + helm show values juice/juice-shop ``` ] @@ -410,18 +434,19 @@ The `readme` may or may not have (accurate) explanations for the values. - Values can be set when installing a chart, or when upgrading it -- We are going to update `java4ever` to change the type of the service +- We are going to update `my-juice-shop` to change the type of the service .exercise[ -- Update `java4ever`: +- Update `my-juice-shop`: ```bash - helm upgrade java4ever stable/tomcat --set service.type=NodePort + helm upgrade my-juice-shop juice/my-juice-shop \ + --set service.type=NodePort ``` ] -Note that we have to specify the chart that we use (`stable/tomcat`), +Note that we have to specify the chart that we use (`juice/my-juice-shop`), even if we just want to update some values. We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values. @@ -430,25 +455,21 @@ All unspecified values will take the default values defined in the chart. --- -## Connecting to tomcat +## Connecting to the Juice Shop -- Let's check the tomcat server that we just installed - -- Note: its readiness probe has a 60s delay - - (so it will take 60s after the initial deployment before the service works) +- Let's check the app that we just installed .exercise[ - Check the node port allocated to the service: ```bash - kubectl get service java4ever-tomcat - PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort}) + kubectl get service my-juice-shop + PORT=$(kubectl get service my-juice-shop -o jsonpath={..nodePort}) ``` -- Connect to it, checking the demo app on `/sample/`: +- Connect to it: ```bash - curl localhost:$PORT/sample/ + curl localhost:$PORT/ ``` ] @@ -462,3 +483,17 @@ All unspecified values will take the default values defined in the chart. :FR:- Fonctionnement général de Helm :FR:- Installer des composants via Helm :FR:- Helm 2, Helm 3, et le *Helm Hub* + +:T: Getting started with Helm and its concepts + +:Q: Which comparison is the most adequate? +:A: Helm is a firewall, charts are access lists +:A: ✔️Helm is a package manager, charts are packages +:A: Helm is an artefact repository, charts are artefacts +:A: Helm is a CI/CD platform, charts are CI/CD pipelines + +:Q: What's required to distribute a Helm chart? +:A: A Helm commercial license +:A: A Docker registry +:A: An account on the Helm Hub +:A: ✔️An HTTP server diff --git a/slides/k8s/helm-secrets.md b/slides/k8s/helm-secrets.md index 032ad0e4..8d613b54 100644 --- a/slides/k8s/helm-secrets.md +++ b/slides/k8s/helm-secrets.md @@ -12,22 +12,37 @@ --- +## Adding the repo + +- If you haven't done it before, you need to add the repo for that chart + +.exercise[ + +- Add the repo that holds the chart for the OWASP Juice Shop: + ```bash + helm repo add juice https://charts.securecodebox.io + ``` + +] + +--- + ## We need a release - We need to install something with Helm -- Let's use the `stable/tomcat` chart as an example +- Let's use the `juice/juice-shop` chart as an example .exercise[ -- Install a release called `tomcat` with the chart `stable/tomcat`: +- Install a release called `orange` with the chart `juice/juice-shop`: ```bash - helm upgrade tomcat stable/tomcat --install + helm upgrade orange juice/juice-shop --install ``` - Let's upgrade that release, and change a value: ```bash - helm upgrade tomcat stable/tomcat --set ingress.enabled=true + helm upgrade orange juice/juice-shop --set ingress.enabled=true ``` ] @@ -42,7 +57,7 @@ - View the history for that release: ```bash - helm history tomcat + helm history orange ``` ] @@ -82,11 +97,11 @@ We should see a number of secrets with TYPE `helm.sh/release.v1`. .exercise[ -- Examine the secret corresponding to the second release of `tomcat`: +- Examine the secret corresponding to the second release of `orange`: ```bash - kubectl describe secret sh.helm.release.v1.tomcat.v2 + kubectl describe secret sh.helm.release.v1.orange.v2 ``` - (`v1` is the secret format; `v2` means revision 2 of the `tomcat` release) + (`v1` is the secret format; `v2` means revision 2 of the `orange` release) ] @@ -102,7 +117,7 @@ There is a key named `release`. - Dump the secret: ```bash - kubectl get secret sh.helm.release.v1.tomcat.v2 \ + kubectl get secret sh.helm.release.v1.orange.v2 \ -o go-template='{{ .data.release }}' ``` @@ -120,7 +135,7 @@ Secrets are encoded in base64. We need to decode that! - Decode the secret: ```bash - kubectl get secret sh.helm.release.v1.tomcat.v2 \ + kubectl get secret sh.helm.release.v1.orange.v2 \ -o go-template='{{ .data.release | base64decode }}' ``` @@ -144,7 +159,7 @@ Let's try one more round of decoding! - Decode it twice: ```bash - kubectl get secret sh.helm.release.v1.tomcat.v2 \ + kubectl get secret sh.helm.release.v1.orange.v2 \ -o go-template='{{ .data.release | base64decode | base64decode }}' ``` @@ -164,7 +179,7 @@ Let's try one more round of decoding! - Pipe the decoded release through `file -`: ```bash - kubectl get secret sh.helm.release.v1.tomcat.v2 \ + kubectl get secret sh.helm.release.v1.orange.v2 \ -o go-template='{{ .data.release | base64decode | base64decode }}' \ | file - ``` @@ -185,7 +200,7 @@ Gzipped data! It can be decoded with `gunzip -c`. - Rerun the previous command, but with `| gunzip -c > release-info` : ```bash - kubectl get secret sh.helm.release.v1.tomcat.v2 \ + kubectl get secret sh.helm.release.v1.orange.v2 \ -o go-template='{{ .data.release | base64decode | base64decode }}' \ | gunzip -c > release-info ``` @@ -211,7 +226,7 @@ If we inspect that JSON (e.g. with `jq keys release-info`), we see: - `config` (contains the values that we've set) - `info` (date of deployment, status messages) - `manifest` (YAML generated from the templates) -- `name` (name of the release, so `tomcat`) +- `name` (name of the release, so `orange`) - `namespace` (namespace where we deployed the release) - `version` (revision number within that release; starts at 1) diff --git a/slides/k8s/helm-values-schema-validation.md b/slides/k8s/helm-values-schema-validation.md new file mode 100644 index 00000000..c8f39a2e --- /dev/null +++ b/slides/k8s/helm-values-schema-validation.md @@ -0,0 +1,191 @@ +# Helm and invalid values + +- A lot of Helm charts let us specify an image tag like this: + ```bash + helm install ... --set image.tag=v1.0 + ``` + +- What happens if we make a small mistake, like this: + ```bash + helm install ... --set imagetag=v1.0 + ``` + +- Or even, like this: + ```bash + helm install ... --set image=v1.0 + ``` + +🤔 + +--- + +## Making mistakes + +- In the first case: + + - we set `imagetag=v1.0` instead of `image.tag=v1.0` + + - Helm will ignore that value (if it's not used anywhere in templates) + + - the chart is deployed with the default value instead + +- In the second case: + + - we set `image=v1.0` instead of `image.tag=v1.0` + + - `image` will be a string instead of an object + + - Helm will *probably* fail when trying to evaluate `image.tag` + +--- + +## Preventing mistakes + +- To prevent the first mistake, we need to tell Helm: + + *"let me know if any additional (unknonw) value was set!"* + +- To prevent the second mistake, we need to tell Helm: + + *"`image` should be an object, and `image.tag` should be a string!"* + +- We can do this with *values schema validation* + +--- + +## Helm values schema validation + +- We can write a spec representing the possible values accepted by the chart + +- Helm will check the validity of the values before trying to install/upgrade + +- If it finds problems, it will stop immediately + +- The spec uses [JSON Schema](https://json-schema.org/): + + *JSON Schema is a vocabulary that allows you to annotate and validate JSON documents.* + +- JSON Schema is designed for JSON, but can easily work with YAML too + + (or any language with `map|dict|associativearray` and `list|array|sequence|tuple`) + +--- + +## In practice + +- We need to put the JSON Schema spec in a file called `values.schema.json` + + (at the root of our chart; right next to `values.yaml` etc.) + +- The file is optional + +- We don't need to register or declare it in `Chart.yaml` or anywhere + +- Let's write a schema that will verify that ... + + - `image.repository` is an official image (string without slashes or dots) + + - `image.pullPolicy` can only be `Always`, `Never`, `IfNotPresent` + +--- + +## `values.schema.json` + +```json +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "image": { + "type": "object", + "properties": { + "repository": { + "type": "string", + "pattern": "^[a-z0-9-_]+$" + }, + "pullPolicy": { + "type": "string", + "pattern": "^(Always|Never|IfNotPresent)$" + } + } + } + } +} +``` + +--- + +## Testing our schema + +- Let's try to install a couple releases with that schema! + +.exercise[ + +- Try an invalid `pullPolicy`: + ```bash + helm install broken --set image.pullPolicy=ShallNotPass + ``` + +- Try an invalid value: + ```bash + helm install should-break --set ImAgeTAg=toto + ``` + +] + +- The first one fails, but the second one still passes ... + +- Why? + +--- + +## Bailing out on unkown properties + +- We told Helm what properties (values) were valid + +- We didn't say what to do about additional (unknown) properties! + +- We can fix that with `"additionalProperties": false` + +.exercise[ + +- Edit `values.schema.json` to add `"additionalProperties": false` + ```json + { + "$schema": "http://json-schema.org/schema#", + "type": "object", + "additionalProperties": false, + "properties": { + ... + ``` + +] + +--- + +## Testing with unknown properties + +.exercise[ + +- Try to pass an extra property: + ```bash + helm install should-break --set ImAgeTAg=toto + ``` + +- Try to pass an extra nested property: + ```bash + helm install does-it-work --set image.hello=world + ``` + +] + +The first command should break. + +The second will not. + +`"additionalProperties": false` needs to be specified at each level. + +??? + +:EN:- Helm schema validation +:FR:- Validation de schema Helm diff --git a/slides/k8s/kubenet.md b/slides/k8s/kubenet.md index 86bf2d44..abbc18ca 100644 --- a/slides/k8s/kubenet.md +++ b/slides/k8s/kubenet.md @@ -52,7 +52,7 @@ - There are literally dozens of implementations out there - (15 are listed in the Kubernetes documentation) + (https://github.com/containernetworking/cni/ lists more than 25 plugins) - Pods have level 3 (IP) connectivity, but *services* are level 4 (TCP or UDP) diff --git a/slides/k8s/openebs.md b/slides/k8s/openebs.md new file mode 100644 index 00000000..942b398e --- /dev/null +++ b/slides/k8s/openebs.md @@ -0,0 +1,521 @@ +# OpenEBS + + - [OpenEBS] is a popular open-source storage solution for Kubernetes + + - Uses the concept of "Container Attached Storage" + + (1 volume = 1 dedicated controller pod + a set of replica pods) + + - Supports a wide range of storage engines: + + - LocalPV: local volumes (hostpath or device), no replication + + - Jiva: for lighter workloads with basic cloning/snapshotting + + - cStor: more powerful engine that also supports resizing, RAID, disk pools ... + + - [Mayastor]: newer, even more powerful engine with NVMe and vhost-user support + +[OpenEBS]: https://openebs.io/ + +[Mayastor]: https://github.com/openebs/MayaStor#mayastor + +--- + +class: extra-details + +## What are all these storage engines? + +- LocalPV is great if we want good performance, no replication, easy setup + + (it is similar to the Rancher local path provisioner) + +- Jiva is great if we want replication and easy setup + + (data is stored in containers' filesystems) + +- cStor is more powerful and flexible, but requires more extensive setup + +- Mayastor is designed to achieve extreme performance levels + + (with the right hardware and disks) + +- The OpenEBS documentation has a [good comparison of engines] to help us pick + +[good comparison of engines]: https://docs.openebs.io/docs/next/casengines.html#cstor-vs-jiva-vs-localpv-features-comparison + +--- + +## Installing OpenEBS with Helm + +- The OpenEBS control plane can be installed with Helm + +- It will run as a set of containers on Kubernetes worker nodes + +.exercise[ + + - Install OpenEBS: + ```bash + helm upgrade --install openebs openebs \ + --repo https://openebs.github.io/charts \ + --namespace openebs --create-namespace + ``` +] + +--- + +## Checking what was installed + +- Wait a little bit ... + +.exercise[ + +- Look at the pods in the `openebs` namespace: + ```bash + kubectl get pods --namespace openebs + ``` + +- And the StorageClasses that were created: + ```bash + kubectl get sc + ``` + +] + +--- + +## The default StorageClasses + +- OpenEBS typically creates three default StorageClasses + +- `openebs-jiva-default` provisions 3 replicated Jiva pods per volume + + - data is stored in `/openebs` in the replica pods + - `/openebs` is a localpath volume mapped to `/var/openebs/pvc-...` on the node + +- `openebs-hostpath` uses LocalPV with local directories + + - volumes are hostpath volumes created in `/var/openebs/local` on each node + +- `openebs-device` uses LocalPV with local block devices + + - requires available disks and/or a bit of extra configuration + - the default configuration filters out loop, LVM, MD devices + +--- + +## When do we need custom StorageClasses? + +- To store LocalPV hostpath volumes on a different path on the host + +- To change the number of replicated Jiva pods + +- To use a different Jiva pool + + (i.e. a different path on the host to store the Jiva volumes) + +- To create a cStor pool + +- ... + +--- + +class: extra-details + +## Defining a custom StorageClass + +Example for a LocalPV hostpath class using an extra mount on `/mnt/vol001`: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: localpv-hostpath-mntvol001 + annotations: + openebs.io/cas-type: local + cas.openebs.io/config: | + - name: BasePath + value: "/mnt/vol001" + - name: StorageType + value: "hostpath" +provisioner: openebs.io/local +``` + +- `provisioner` needs to be set accordingly +- Storage engine is chosen by specifying the annotation `openebs.io/cas-type` +- Storage engine configuration is set with the annotation `cas.openebs.io/config` + +--- + +## Checking the default hostpath StorageClass + +- Let's inspect the StorageClass that OpenEBS created for us + +.exercise[ + +- Let's look at the OpenEBS LocalPV hostpath StorageClass: + ```bash + kubectl get storageclass openebs-hostpath -o yaml + ``` +] + +--- + +## Create a host path PVC + +- Let's create a Persistent Volume Claim using an explicit StorageClass + +.exercise[ + +```bash +kubectl apply -f - <