mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-03 01:40:19 +00:00
Compare commits
26 Commits
2021-02-en
...
2021-04-di
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f67a09c3f1 | ||
|
|
b0dc1c7c3f | ||
|
|
bb1b225026 | ||
|
|
2160aa7f40 | ||
|
|
8f75a4cd7f | ||
|
|
45213a8f2e | ||
|
|
f03aedd024 | ||
|
|
fcfcb127b4 | ||
|
|
5380b2d52a | ||
|
|
cc5da860b9 | ||
|
|
9e9b17f6c9 | ||
|
|
b9ea938157 | ||
|
|
b23aacdce0 | ||
|
|
c3d6e5e660 | ||
|
|
907adf8075 | ||
|
|
dff505ac76 | ||
|
|
df0ffc4d75 | ||
|
|
02278b3748 | ||
|
|
ab959220ba | ||
|
|
b4576e39d0 | ||
|
|
894dafeecb | ||
|
|
366c656d82 | ||
|
|
a60f929232 | ||
|
|
fdc58cafda | ||
|
|
8de186b909 | ||
|
|
b816d075d4 |
13
prepare-eks/10_create_cluster.sh
Executable file
13
prepare-eks/10_create_cluster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# Create an EKS cluster.
|
||||
# This is not idempotent (each time you run it, it creates a new cluster).
|
||||
|
||||
eksctl create cluster \
|
||||
--node-type=t3.large \
|
||||
--nodes-max=10 \
|
||||
--alb-ingress-access \
|
||||
--asg-access \
|
||||
--ssh-access \
|
||||
--with-oidc \
|
||||
#
|
||||
|
||||
32
prepare-eks/20_create_users.sh
Executable file
32
prepare-eks/20_create_users.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
# For each user listed in "users.txt", create an IAM user.
|
||||
# Also create AWS API access keys, and store them in "users.keys".
|
||||
# This is idempotent (you can run it multiple times, it will only
|
||||
# create the missing users). However, it will not remove users.
|
||||
# Note that you can remove users from "users.keys" (or even wipe
|
||||
# that file out entirely) and then this script will delete their
|
||||
# keys and generate new keys for them (and add the new keys to
|
||||
# "users.keys".)
|
||||
|
||||
echo "Getting list of existing users ..."
|
||||
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
if ! grep -qw $U users.tmp; then
|
||||
echo "Creating user $U..."
|
||||
aws iam create-user --user-name=$U \
|
||||
--tags=Key=container.training,Value=1
|
||||
fi
|
||||
if ! grep -qw $U users.keys; then
|
||||
echo "Listing keys for user $U..."
|
||||
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
|
||||
for KEY in $KEYS; do
|
||||
echo "Deleting key $KEY for user $U..."
|
||||
aws iam delete-access-key --user=$U --access-key-id=$KEY
|
||||
done
|
||||
echo "Creating access key for user $U..."
|
||||
aws iam create-access-key --user=$U --output json \
|
||||
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
|
||||
>> users.keys
|
||||
fi
|
||||
done
|
||||
51
prepare-eks/30_create_or_update_policy.sh
Executable file
51
prepare-eks/30_create_or_update_policy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
|
||||
# This is idempotent, which allows to update the policy document below if
|
||||
# you want the users to do other things as well.
|
||||
# Note that each time you run this script, it will actually create a new
|
||||
# version of the policy, set that version as the default version, and
|
||||
# remove all non-default versions. (Because you can only have up to
|
||||
# 5 versions of a given policy, so you need to clean them up.)
|
||||
# After running that script, you will want to attach the policy to our
|
||||
# users (check the other scripts in that directory).
|
||||
|
||||
POLICY_NAME=user.container.training
|
||||
POLICY_DOC='{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"eks:DescribeCluster"
|
||||
],
|
||||
"Resource": "arn:aws:eks:*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
aws iam create-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--policy-document "$POLICY_DOC" \
|
||||
--set-as-default
|
||||
|
||||
# For reference, the command below creates a policy without versioning:
|
||||
#aws iam create-policy \
|
||||
#--policy-name user.container.training \
|
||||
#--policy-document "$JSON"
|
||||
|
||||
for VERSION in $(
|
||||
aws iam list-policy-versions \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--query 'Versions[?!IsDefaultVersion].VersionId' \
|
||||
--output text)
|
||||
do
|
||||
aws iam delete-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--version-id "$VERSION"
|
||||
done
|
||||
|
||||
# For reference, the command below shows all users using the policy:
|
||||
#aws iam list-entities-for-policy \
|
||||
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
14
prepare-eks/40_attach_policy.sh
Executable file
14
prepare-eks/40_attach_policy.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
# Attach our user policy to all the users defined in "users.txt".
|
||||
# This should be idempotent, because attaching the same policy
|
||||
# to the same user multiple times doesn't do anything.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
POLICY_NAME=user.container.training
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
echo "Attaching policy to user $U ..."
|
||||
aws iam attach-user-policy \
|
||||
--user-name $U \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
done
|
||||
24
prepare-eks/50_aws_auth.sh
Executable file
24
prepare-eks/50_aws_auth.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
|
||||
# Each user defined in "users.txt" will be mapped to a Kubernetes user
|
||||
# with the same name, and put in the "container.training" group, too.
|
||||
# This is idempotent.
|
||||
# WARNING: this will wipe out the mapUsers component of the aws-auth
|
||||
# ConfigMap, removing all users that aren't in "users.txt".
|
||||
# It won't touch mapRoles, so it shouldn't break the role mappings
|
||||
# put in place by EKS.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
rm -f users.map
|
||||
for U in $(cat users.txt); do
|
||||
echo "\
|
||||
- userarn: arn:aws:iam::$ACCOUNT:user/$U
|
||||
username: $U
|
||||
groups: [ container.training ]\
|
||||
" >> users.map
|
||||
done
|
||||
|
||||
kubectl create --namespace=kube-system configmap aws-auth \
|
||||
--dry-run=client --from-file=mapUsers=users.map -o yaml \
|
||||
| kubectl apply -f-
|
||||
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
# Create a shared Kubernetes Namespace ("container-training") as well as
|
||||
# individual namespaces for every user in "users.txt", and set up a bunch
|
||||
# of permissions.
|
||||
# Specifically:
|
||||
# - each user gets "view" permissions in the "default" Namespace
|
||||
# - each user gets "edit" permissions in the "container-training" Namespace
|
||||
# - each user gets permissions to list Nodes and Namespaces
|
||||
# - each user gets "admin" permissions in their personal Namespace
|
||||
# Note that since Kubernetes Namespaces can't have dots in their names,
|
||||
# if a user has dots, dots will be mapped to dashes.
|
||||
# So user "ada.lovelace" will get namespace "ada-lovelace".
|
||||
# This is kind of idempotent (but will raise a bunch of errors for objects
|
||||
# that already exist).
|
||||
# TODO: if this needs to evolve, replace all the "create" operations by
|
||||
# "apply" operations. But this is good enough for now.
|
||||
|
||||
kubectl create rolebinding --namespace default container.training \
|
||||
--group=container.training --clusterrole=view
|
||||
|
||||
kubectl create clusterrole view-nodes \
|
||||
--verb=get,list,watch --resource=node
|
||||
kubectl create clusterrolebinding view-nodes \
|
||||
--group=container.training --clusterrole=view-nodes
|
||||
|
||||
kubectl create clusterrole view-namespaces \
|
||||
--verb=get,list,watch --resource=namespace
|
||||
kubectl create clusterrolebinding view-namespaces \
|
||||
--group=container.training --clusterrole=view-namespaces
|
||||
|
||||
kubectl create namespace container-training
|
||||
kubectl create rolebinding --namespace container-training edit \
|
||||
--group=container.training --clusterrole=edit
|
||||
|
||||
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
|
||||
# instead of running "kubectl" N times, we generate a bunch of YAML and
|
||||
# apply it. It will still generate a lot of API calls but it's much faster
|
||||
# than calling "kubectl" N times. It might be possible to make this even
|
||||
# faster by generating a "kind: List" (I don't know if this would issue
|
||||
# a single API calls or multiple ones; TBD!)
|
||||
for U in $(cat users.txt); do
|
||||
NS=$(echo $U | tr . -)
|
||||
cat <<EOF
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: $NS
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin
|
||||
namespace: $NS
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: $U
|
||||
EOF
|
||||
done | kubectl create -f-
|
||||
|
||||
76
prepare-eks/70_oidc.sh
Executable file
76
prepare-eks/70_oidc.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM role to be used by a Kubernetes ServiceAccount.
|
||||
# The role isn't given any permissions yet (this has to be done by
|
||||
# another script in this series), but a properly configured Pod
|
||||
# should still be able to execute "aws sts get-caller-identity"
|
||||
# and confirm that it's using that role.
|
||||
# This requires the cluster to have an attached OIDC provider.
|
||||
# This should be the case if the cluster has been created with
|
||||
# the scripts in this directory; otherwise, this can be done with
|
||||
# the subsequent command, which is idempotent:
|
||||
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
|
||||
# The policy document used below will authorize all ServiceAccounts
|
||||
# in the "container-training" Namespace to use that role.
|
||||
# This script will also annotate the container-training:default
|
||||
# ServiceAccount so that it can use that role.
|
||||
# This script is not quite idempotent: if you want to use a new
|
||||
# trust policy, some work will be required. (You can delete the role,
|
||||
# but that requires detaching the associated policies. There might also
|
||||
# be a way to update the trust policy directly; we didn't investigate this
|
||||
# further at this point.)
|
||||
|
||||
if [ "$1" ]; then
|
||||
CLUSTER="$1"
|
||||
else
|
||||
echo "Please indicate cluster to use. Available clusters:"
|
||||
aws eks list-clusters --output table
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
TRUST_POLICY=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-role \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--assume-role-policy-document "$TRUST_POLICY"
|
||||
|
||||
kubectl annotate serviceaccounts \
|
||||
--namespace container-training default \
|
||||
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
|
||||
--overwrite
|
||||
|
||||
exit
|
||||
|
||||
# Here are commands to delete the role:
|
||||
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
|
||||
aws iam delete-role --role-name $ROLE_NAME
|
||||
|
||||
# Merging the policy with the existing policies:
|
||||
{
|
||||
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
|
||||
echo "$TRUST_POLICY" | jq -r .Statement[]
|
||||
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
|
||||
aws iam update-assume-role-policy \
|
||||
--role-name $ROLE_NAME \
|
||||
--policy-document file:///tmp/policy.json
|
||||
54
prepare-eks/80_s3_bucket.sh
Executable file
54
prepare-eks/80_s3_bucket.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
# Create an S3 bucket with two objects in it:
|
||||
# - public.txt (world-readable)
|
||||
# - private.txt (private)
|
||||
# Also create an IAM policy granting read-only access to the bucket
|
||||
# (and therefore, to the private object).
|
||||
# Finally, attach the policy to an IAM role (for instance, the role
|
||||
# created by another script in this directory).
|
||||
# This isn't idempotent, but it can be made idempotent by replacing the
|
||||
# "aws iam create-policy" call with "aws iam create-policy-version" and
|
||||
# a bit of extra elbow grease. (See other scripts in this directory for
|
||||
# an example).
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
BUCKET=container.training
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
POLICY_NAME=s3-reader-container-training
|
||||
POLICY_DOC=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::$BUCKET",
|
||||
"arn:aws:s3:::$BUCKET/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-policy \
|
||||
--policy-name $POLICY_NAME \
|
||||
--policy-doc "$POLICY_DOC"
|
||||
|
||||
aws s3 mb s3://container.training
|
||||
|
||||
echo "this is a public object" \
|
||||
| aws s3 cp - s3://container.training/public.txt \
|
||||
--acl public-read
|
||||
|
||||
echo "this is a private object" \
|
||||
| aws s3 cp - s3://container.training/private.txt \
|
||||
--acl private
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
50
prepare-eks/users.txt
Normal file
50
prepare-eks/users.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
ada.lovelace
|
||||
adele.goldstine
|
||||
amanda.jones
|
||||
anita.borg
|
||||
ann.kiessling
|
||||
barbara.mcclintock
|
||||
beatrice.worsley
|
||||
bessie.blount
|
||||
betty.holberton
|
||||
beulah.henry
|
||||
carleen.hutchins
|
||||
caroline.herschel
|
||||
dona.bailey
|
||||
dorothy.hodgkin
|
||||
ellen.ochoa
|
||||
edith.clarke
|
||||
elisha.collier
|
||||
elizabeth.feinler
|
||||
emily.davenport
|
||||
erna.hoover
|
||||
frances.spence
|
||||
gertrude.blanch
|
||||
grace.hopper
|
||||
grete.hermann
|
||||
giuliana.tesoro
|
||||
harriet.tubman
|
||||
hedy.lamarr
|
||||
irma.wyman
|
||||
jane.goodall
|
||||
jean.bartik
|
||||
joy.mangano
|
||||
josephine.cochrane
|
||||
katherine.blodgett
|
||||
kathleen.antonelli
|
||||
lynn.conway
|
||||
margaret.hamilton
|
||||
maria.beasley
|
||||
marie.curie
|
||||
marjorie.joyner
|
||||
marlyn.meltzer
|
||||
mary.kies
|
||||
melitta.bentz
|
||||
milly.koss
|
||||
radia.perlman
|
||||
rosalind.franklin
|
||||
ruth.teitelbaum
|
||||
sarah.mather
|
||||
sophie.wilson
|
||||
stephanie.kwolek
|
||||
yvonne.brill
|
||||
@@ -69,11 +69,14 @@ _cmd_deploy() {
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
@@ -102,6 +105,12 @@ _cmd_deploy() {
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# If there is no "python" binary, symlink to python3
|
||||
#pssh "
|
||||
#if ! which python; then
|
||||
# ln -s $(which python3) /usr/local/bin/python
|
||||
#fi"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
@@ -208,7 +217,14 @@ _cmd_kube() {
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Initialize kube master
|
||||
# Disable swap
|
||||
# (note that this won't survive across node reboots!)
|
||||
if [ "$INFRACLASS" = "linode" ]; then
|
||||
pssh "
|
||||
sudo swapoff -a"
|
||||
fi
|
||||
|
||||
# Initialize kube control plane
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
|
||||
58
prepare-vms/lib/infra/linode.sh
Normal file
58
prepare-vms/lib/infra/linode.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
if ! command -v linode-cli >/dev/null; then
|
||||
warn "Linode CLI (linode-cli) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/linode-cli ]; then
|
||||
warn "~/.config/linode-cli not found."
|
||||
fi
|
||||
|
||||
# To view available regions: "linode-cli regions list"
|
||||
LINODE_REGION=${LINODE_REGION-us-west}
|
||||
|
||||
# To view available types: "linode-cli linodes types"
|
||||
LINODE_TYPE=${LINODE_TYPE-g6-standard-2}
|
||||
|
||||
infra_list() {
|
||||
linode-cli linodes list --json |
|
||||
jq -r '.[] | [.id, .label, .status, .type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $LINODE_REGION"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $LINODE_TYPE"
|
||||
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
|
||||
linode-cli linodes create \
|
||||
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
|
||||
--image=linode/ubuntu18.04 \
|
||||
--authorized_keys="${LINODE_SSHKEY}" \
|
||||
--root_pass="${ROOT_PASS}" \
|
||||
--tags=${TAG} --label=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
linode_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
linode_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
linode-cli linodes delete
|
||||
}
|
||||
|
||||
linode_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].id"
|
||||
}
|
||||
|
||||
linode_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]"
|
||||
}
|
||||
@@ -18,11 +18,11 @@ pssh() {
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
if [ "$INFRACLASS" = hetzner ]; then
|
||||
LOGIN=root
|
||||
else
|
||||
LOGIN=ubuntu
|
||||
fi
|
||||
case "$INFRACLASS" in
|
||||
hetzner) LOGIN=root ;;
|
||||
linode) LOGIN=root ;;
|
||||
*) LOGIN=ubuntu ;;
|
||||
esac
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
|
||||
@@ -26,16 +26,16 @@ apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
|
||||
# Figure out if we're called for a bunch of domains, or just one.
|
||||
first_arg = sys.argv[1]
|
||||
if os.path.isfile(first_arg):
|
||||
domains = open(first_arg).read().split()
|
||||
domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [first_arg]
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
|
||||
69
slides/1.yml
69
slides/1.yml
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Docker Intensif
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-02-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
-
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- # DAY 3
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
-
|
||||
- containers/Orchestration_Overview.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
- shared/thankyou.md
|
||||
#- containers/links.md
|
||||
103
slides/2.yml
103
slides/2.yml
@@ -1,103 +0,0 @@
|
||||
title: |
|
||||
Fondamentaux Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-02-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- # 1
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- # 2
|
||||
- k8s/kubectl-run.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- # 3
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- # 4
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- # 5
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- # 6
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- # 7
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-tls.md
|
||||
- # 8
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/batch-jobs.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
#- k8s/whatsnext.md
|
||||
#- k8s/lastwords.md
|
||||
- shared/thankyou.md
|
||||
- k8s/links.md
|
||||
#-
|
||||
# - |
|
||||
# # (Bonus)
|
||||
# - k8s/record.md
|
||||
# - k8s/dryrun.md
|
||||
50
slides/4.yml
50
slides/4.yml
@@ -1,50 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Avancé
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-02-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- #2
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
- #3
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- #4
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/hpa-v2.md
|
||||
- #5
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #6
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/eck.md
|
||||
- k8s/portworx.md
|
||||
56
slides/5.yml
56
slides/5.yml
@@ -1,56 +0,0 @@
|
||||
title: |
|
||||
Opérer Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-02-enix.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
#- k8s/cloud-controller-manager.md
|
||||
-
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- shared/thankyou.md
|
||||
-
|
||||
|
|
||||
# (Extra content)
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
@@ -6,10 +6,10 @@
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=DOCKERALLDAY
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=DOCKERALLDAY
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
@@ -19,4 +19,7 @@
|
||||
/next https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
/hi5 https://enix.io/fr/services/formation/online/
|
||||
|
||||
/ /highfive.html 200!
|
||||
# Survey form
|
||||
/please https://docs.google.com/forms/d/e/1FAIpQLSfIYSgrV7tpfBNm1hOaprjnBHgWKn5n-k5vtNXYJkOX1sRxng/viewform
|
||||
|
||||
/ /helm.yml.html 200!
|
||||
|
||||
@@ -329,4 +329,4 @@ This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
:EN:- Connecting services together with a *Compose file*
|
||||
|
||||
:FR:- Utiliser Compose pour décrire son environnement
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
|
||||
@@ -742,3 +742,15 @@ class: extra-details
|
||||
* This may be used to access an internal package repository.
|
||||
|
||||
(But try to use a multi-stage build instead, if possible!)
|
||||
|
||||
???
|
||||
|
||||
:EN:Container networking essentials
|
||||
:EN:- The Container Network Model
|
||||
:EN:- Container isolation
|
||||
:EN:- Service discovery
|
||||
|
||||
:FR:Mettre ses conteneurs en réseau
|
||||
:FR:- Le "Container Network Model"
|
||||
:FR:- Isolation des conteneurs
|
||||
:FR:- *Service discovery*
|
||||
|
||||
@@ -229,10 +229,5 @@ containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:Connecting containers
|
||||
:EN:- Container networking basics
|
||||
:EN:- Exposing a container
|
||||
|
||||
:FR:Connecter les conteneurs
|
||||
:FR:- Description du modèle réseau des conteneurs
|
||||
:FR:- Exposer un conteneur
|
||||
:EN:- Exposing single containers
|
||||
:FR:- Exposer un conteneur isolé
|
||||
|
||||
@@ -101,5 +101,5 @@ Success!
|
||||
|
||||
???
|
||||
|
||||
:EN:- The build cache
|
||||
:EN:- Leveraging the build cache for faster builds
|
||||
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
|
||||
|
||||
@@ -434,5 +434,12 @@ services:
|
||||
|
||||
???
|
||||
|
||||
:EN:Optimizing images
|
||||
:EN:- Dockerfile tips, tricks, and best practices
|
||||
:FR:- Bonnes pratiques pour la construction des images
|
||||
:EN:- Reducing build time
|
||||
:EN:- Reducing image size
|
||||
|
||||
:FR:Optimiser ses images
|
||||
:FR:- Bonnes pratiques, trucs et astuces
|
||||
:FR:- Réduire le temps de build
|
||||
:FR:- Réduire la taille des images
|
||||
|
||||
@@ -82,3 +82,12 @@ Use cases:
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
???
|
||||
|
||||
:EN:Advanced container networking
|
||||
:EN:- Transparent network access with the "host" driver
|
||||
:EN:- Sharing is caring with the "container" driver
|
||||
|
||||
:FR:Paramétrage réseau avancé
|
||||
:FR:- Accès transparent au réseau avec le mode "host"
|
||||
:FR:- Partage de la pile réseau avece le mode "container"
|
||||
|
||||
@@ -2,11 +2,11 @@ title: |
|
||||
Packaging d'applications
|
||||
et CI/CD pour Kubernetes
|
||||
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-202102-online)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-02-enix.container.training/
|
||||
slides: https://2021-04-dijon.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -25,11 +25,14 @@ content:
|
||||
#- shared/chat-room-zoom.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/kustomize.md
|
||||
- shared/sampleapp.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
-
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-dependencies.md
|
||||
- k8s/helm-values-schema-validation.md
|
||||
- k8s/helm-secrets.md
|
||||
-
|
||||
- k8s/cert-manager.md
|
||||
@@ -37,3 +40,4 @@ content:
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/prometheus.md
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Lundi 8 février 2021</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 9 février 2021</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 10 février 2021</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Docker Intensif</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 15 février 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 16 février 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mercredi 17 février 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Jeudi 18 février 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Fondamentaux Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 22 février 2021</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications et CI/CD pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 23 février 2021</td>
|
||||
<td>
|
||||
<a href="3.yml.html">Packaging d'applications et CI/CD pour Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<td>Mercredi 24 février 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tr>
|
||||
<td>Jeudi 25 février 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tr>
|
||||
<td>Vendredi 26 février 2021</td>
|
||||
<td>
|
||||
<a href="4.yml.html">Kubernetes Avancé</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Lundi 1er mars 2021</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Mardi 2 mars 2021</td>
|
||||
<td>
|
||||
<a href="5.yml.html">Opérer Kubernetes</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
@@ -20,6 +20,7 @@ content:
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -20,6 +20,7 @@ content:
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -20,6 +20,7 @@ content:
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
104
slides/k8s/access-eks-cluster.md
Normal file
104
slides/k8s/access-eks-cluster.md
Normal file
@@ -0,0 +1,104 @@
|
||||
## Accessing our EKS cluster
|
||||
|
||||
- We also have a shared EKS cluster
|
||||
|
||||
- With individual IAM users
|
||||
|
||||
- Let's connect to this cluster!
|
||||
|
||||
---
|
||||
|
||||
## What we need
|
||||
|
||||
- `kubectl` (obviously!)
|
||||
|
||||
- `aws` CLI (recent-ish version)
|
||||
|
||||
(or `aws` CLI + `aws-iam-authenticator` plugin)
|
||||
|
||||
- AWS API access key and secret access key
|
||||
|
||||
- AWS region
|
||||
|
||||
- EKS cluster name
|
||||
|
||||
---
|
||||
|
||||
## Setting up AWS credentials
|
||||
|
||||
- There are many ways to do this
|
||||
|
||||
- We're going to use environment variables
|
||||
|
||||
- You're welcome to use whatever you like (e.g. AWS profiles)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Set the AWS region, API access key, and secret key:
|
||||
```bash
|
||||
export AWS_DEFAULT_REGION=`us-east-2`
|
||||
export AWS_ACCESS_KEY_ID=`AKI...`
|
||||
export AWS_SECRET_ACCESS_KEY=`xyz123...`
|
||||
```
|
||||
|
||||
- Check that the AWS API recognizes us:
|
||||
```bash
|
||||
aws sts get-caller-identity
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating our kubeconfig file
|
||||
|
||||
- Now we can use the AWS CLI to:
|
||||
|
||||
- obtain the Kubernetes API address
|
||||
|
||||
- register it in our kubeconfig file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update our kubeconfig file:
|
||||
```bash
|
||||
aws eks update-kubeconfig --name `fancy-clustername-1234`
|
||||
```
|
||||
|
||||
- Run some harmless command:
|
||||
```bash
|
||||
kubectl version
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our resources
|
||||
|
||||
- We have the following permissions:
|
||||
|
||||
- `view` in the `default` namespace
|
||||
|
||||
- `edit` in the `container-training` namespace
|
||||
|
||||
- `admin` in our personal namespace
|
||||
|
||||
- Our personal namespace is our IAM user name
|
||||
|
||||
(but with dots replaced with dashes)
|
||||
|
||||
- For instance, user `ada.lovelace` has namespace `ada-lovelace`
|
||||
|
||||
---
|
||||
|
||||
## Deploying things
|
||||
|
||||
- Let's deploy DockerCoins in our personal namespace!
|
||||
|
||||
- Expose the Web UI with a `LoadBalancer` service
|
||||
|
||||
???
|
||||
|
||||
:EN:- Working with an EKS cluster
|
||||
:FR:- Travailler avec un cluster EKS
|
||||
@@ -134,3 +134,17 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
:EN:- Securely accessing internal services
|
||||
:FR:- Accès sécurisé aux services internes
|
||||
|
||||
:T: Accessing internal services from our local machine
|
||||
|
||||
:Q: What's the advantage of "kubectl port-forward" compared to a NodePort?
|
||||
:A: It can forward arbitrary protocols
|
||||
:A: It doesn't require Kubernetes API credentials
|
||||
:A: It offers deterministic load balancing (instead of random)
|
||||
:A: ✔️It doesn't expose the service to the public
|
||||
|
||||
:Q: What's the security concept behind "kubectl port-forward"?
|
||||
:A: ✔️We authenticate with the Kubernetes API, and it forwards connections on our behalf
|
||||
:A: It detects our source IP address, and only allows connections coming from it
|
||||
:A: It uses end-to-end mTLS (mutual TLS) to authenticate our connections
|
||||
:A: There is no security (as long as it's running, anyone can connect from anywhere)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Authoring YAML
|
||||
|
||||
- There are various ways to generate YAML with Kubernetes, e.g.:
|
||||
- We have already generated YAML implicitly, with e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
@@ -32,26 +32,63 @@
|
||||
|
||||
---
|
||||
|
||||
## We don't have to start from scratch
|
||||
## Various ways to write YAML
|
||||
|
||||
- Create a resource (e.g. Deployment)
|
||||
- Completely from scratch with our favorite editor
|
||||
|
||||
- Dump its YAML with `kubectl get -o yaml ...`
|
||||
(yeah, right)
|
||||
|
||||
- Edit the YAML
|
||||
- Dump an existing resource with `kubectl get -o yaml ...`
|
||||
|
||||
- Use `kubectl apply -f ...` with the YAML file to:
|
||||
(it is recommended to clean up the result)
|
||||
|
||||
- update the resource (if it's the same kind)
|
||||
- Ask `kubectl` to generate the YAML
|
||||
|
||||
- create a new resource (if it's a different kind)
|
||||
(with a `kubectl create --dry-run -o yaml`)
|
||||
|
||||
- Or: Use The Docs, Luke
|
||||
- Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML from scratch
|
||||
|
||||
- Start with a namespace:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello
|
||||
```
|
||||
|
||||
- We can use `kubectl explain` to see resource definitions:
|
||||
```bash
|
||||
kubectl explain -r pod.spec
|
||||
```
|
||||
|
||||
- Not the easiest option!
|
||||
|
||||
---
|
||||
|
||||
## Dump the YAML for an existing resource
|
||||
|
||||
- `kubectl get -o yaml` works!
|
||||
|
||||
- A lot of fields in `metadata` are not necessary
|
||||
|
||||
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
|
||||
|
||||
- Most objects will have a `status` field that is not necessary
|
||||
|
||||
- Default or empty values can also be removed for clarity
|
||||
|
||||
- This can be done manually or with the `kubectl-neat` plugin
|
||||
|
||||
`kubectl get -o yaml ... | kubectl neat`
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run` option
|
||||
@@ -63,14 +100,18 @@
|
||||
kubectl create deployment web --image nginx --dry-run
|
||||
```
|
||||
|
||||
- Optionally clean it up with `kubectl neat`, too
|
||||
|
||||
]
|
||||
|
||||
- We can clean up that YAML even more if we want
|
||||
Note: in recent versions of Kubernetes, we should use `--dry-run=client`
|
||||
|
||||
(for instance, we can remove the `creationTimestamp` and empty dicts)
|
||||
(Or `--dry-run=server`; more on that later!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using `--dry-run` with `kubectl apply`
|
||||
|
||||
- The `--dry-run` option can also be used with `kubectl apply`
|
||||
@@ -87,6 +128,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The limits of `kubectl apply --dry-run`
|
||||
|
||||
.exercise[
|
||||
@@ -112,6 +155,8 @@ The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
|
||||
@@ -135,6 +180,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
@@ -149,6 +196,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- Kubernetes 1.13 also introduced `kubectl diff`
|
||||
@@ -209,3 +258,8 @@ Note: we don't need to specify `--validate=false` here.
|
||||
- check that it still works!
|
||||
|
||||
- That YAML will be useful later when using e.g. Kustomize or Helm
|
||||
|
||||
???
|
||||
|
||||
:EN:- Techniques to write YAML manifests
|
||||
:FR:- Comment écrire des *manifests* YAML
|
||||
@@ -242,3 +242,5 @@ class: extra-details
|
||||
|
||||
:EN:- Obtaining certificates with cert-manager
|
||||
:FR:- Obtenir des certificats avec cert-manager
|
||||
|
||||
:T: Obtaining TLS certificates with cert-manager
|
||||
|
||||
338
slides/k8s/helm-dependencies.md
Normal file
338
slides/k8s/helm-dependencies.md
Normal file
@@ -0,0 +1,338 @@
|
||||
# Charts using other charts
|
||||
|
||||
- Helm charts can have *dependencies* on other charts
|
||||
|
||||
- These dependencies will help us to share or reuse components
|
||||
|
||||
(so that we write and maintain less manifests, less templates, less code!)
|
||||
|
||||
- As an example, we will use a community chart for Redis
|
||||
|
||||
- This will help people who write charts, and people who use them
|
||||
|
||||
- ... And potentially remove a lot of code! ✌️
|
||||
|
||||
---
|
||||
|
||||
## Redis in DockerCoins
|
||||
|
||||
- In the DockerCoins demo app, we have 5 components:
|
||||
|
||||
- 2 internal webservices
|
||||
- 1 worker
|
||||
- 1 public web UI
|
||||
- 1 Redis data store
|
||||
|
||||
- Every component is running some custom code, except Redis
|
||||
|
||||
- Every component is using a custom image, except Redis
|
||||
|
||||
(which is using the official `redis` image)
|
||||
|
||||
- Could we use a standard chart for Redis?
|
||||
|
||||
- Yes! Dependencies to the rescue!
|
||||
|
||||
---
|
||||
|
||||
## Adding our dependency
|
||||
|
||||
- First, we will add the dependency to the `Chart.yaml` file
|
||||
|
||||
- Then, we will ask Helm to download that dependency
|
||||
|
||||
- We will also *lock* the dependency
|
||||
|
||||
(lock it to a specific version, to ensure reproducibility)
|
||||
|
||||
---
|
||||
|
||||
## Declaring the dependency
|
||||
|
||||
- First, let's edit `Chart.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In `Chart.yaml`, fill the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: 11.0.5
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where do that `repository` and `version` come from?
|
||||
|
||||
We're assuming here that we did our reserach,
|
||||
or that our resident Helm expert advised us to
|
||||
use Bitnami's Redis chart.
|
||||
|
||||
---
|
||||
|
||||
## Conditions
|
||||
|
||||
- The `condition` field gives us a way to enable/disable the dependency:
|
||||
```yaml
|
||||
conditions: redis.enabled
|
||||
```
|
||||
|
||||
- Here, we can disable Redis with the Helm flag `--set redis.enabled=false`
|
||||
|
||||
(or set that value in a `values.yaml` file)
|
||||
|
||||
- Of course, this is mostly useful for *optional* dependencies
|
||||
|
||||
(otherwise, the app ends up being broken since it'll miss a component)
|
||||
|
||||
---
|
||||
|
||||
## Lock & Load!
|
||||
|
||||
- After adding the dependency, we ask Helm to pin an download it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ask Helm:
|
||||
```bash
|
||||
helm dependency update
|
||||
```
|
||||
|
||||
(Or `helm dep up`)
|
||||
|
||||
]
|
||||
|
||||
- This wil create `Chart.lock` and fetch the dependency
|
||||
|
||||
---
|
||||
|
||||
## What's `Chart.lock`?
|
||||
|
||||
- This is a common pattern with dependencies
|
||||
|
||||
(see also: `Gemfile.lock`, `package.json.lock`, and many others)
|
||||
|
||||
- This lets us define loose dependencies in `Chart.yaml`
|
||||
|
||||
(e.g. "version 11.whatever, but below 12")
|
||||
|
||||
- But have the exact version used in `Chart.lock`
|
||||
|
||||
- This ensures reproducible deployments
|
||||
|
||||
- `Chart.lock` can (should!) be added to our source tree
|
||||
|
||||
- `Chart.lock` can (should!) regularly be updated
|
||||
|
||||
---
|
||||
|
||||
## Loose dependencies
|
||||
|
||||
- Here is an example of loose version requirement:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: ">=11 <12"
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
- This makes sure that we have the most recent version in the 11.x train
|
||||
|
||||
- ... But without upgrading to version 12.x
|
||||
|
||||
(because it might be incompatible)
|
||||
|
||||
---
|
||||
|
||||
## `build` vs `update`
|
||||
|
||||
- Helm actually offers two commands to manage dependencies:
|
||||
|
||||
`helm dependency build` = fetch dependencies listed in `Chart.lock`
|
||||
|
||||
`helm dependency update` = update `Chart.lock` (and run `build`)
|
||||
|
||||
- When the dependency gets updated, we can/should:
|
||||
|
||||
- `helm dep up` (update `Chart.lock` and fetch new chart)
|
||||
|
||||
- test!
|
||||
|
||||
- if everything is fine, `git add Chart.lock` and commit
|
||||
|
||||
---
|
||||
|
||||
## Where are my dependencies?
|
||||
|
||||
- Dependencies are downloaded to the `charts/` subdirectory
|
||||
|
||||
- When they're downloaded, they stay in compressed format (`.tgz`)
|
||||
|
||||
- Should we commit them to our code repository?
|
||||
|
||||
- Pros:
|
||||
|
||||
- more resilient to internet/mirror failures/decomissioning
|
||||
|
||||
- Cons:
|
||||
|
||||
- can add a lot of weight to the repo if charts are big or change often
|
||||
|
||||
- this can be solved by extra tools like git-lfs
|
||||
|
||||
---
|
||||
|
||||
## Dependency tuning
|
||||
|
||||
- DockerCoins expects the `redis` Service to be named `redis`
|
||||
|
||||
- Our Redis chart uses a different Service name by default
|
||||
|
||||
- Service name is `{{ template "redis.fullname" . }}-master`
|
||||
|
||||
- `redis.fullname` looks like this:
|
||||
```
|
||||
{{- define "redis.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
[...]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
- How do we fix this?
|
||||
|
||||
---
|
||||
|
||||
## Setting dependency variables
|
||||
|
||||
- If we set `fullnameOverride` to `redis`:
|
||||
|
||||
- the `{{ template ... }}` block will output `redis`
|
||||
|
||||
- the Service name will be `redis-master`
|
||||
|
||||
- A parent chart can set values for its dependencies
|
||||
|
||||
- For example, in the parent's `values.yaml`:
|
||||
|
||||
```yaml
|
||||
redis: # Name of the dependency
|
||||
fullnameOverride: redis # Value passed to redis
|
||||
cluster: # Other values passed to redis
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- User can also set variables with `--set=` or with `--values=`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Passing templates
|
||||
|
||||
- We can even pass template `{{ include "template.name" }}`, but warning:
|
||||
|
||||
- need to be evaluated with the `tpl` function, on the child side
|
||||
|
||||
- evaluated in the context of the child, with no access to parent variables
|
||||
|
||||
<!-- FIXME this probably deserves an example, but I can't imagine one right now 😅 -->
|
||||
|
||||
---
|
||||
|
||||
## Getting rid of the `-master`
|
||||
|
||||
- Even if we set that `fullnameOverride`, the Service name will be `redis-master`
|
||||
|
||||
- To remove the `-master` suffix, we need to edit the chart itself
|
||||
|
||||
- To edit the Redis chart, we need to *embed* it in our own chart
|
||||
|
||||
- We need to:
|
||||
|
||||
- decompress the chart
|
||||
|
||||
- adjust `Chart.yaml` accordingly
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency
|
||||
|
||||
.exercise[
|
||||
|
||||
- Decompress the chart:
|
||||
```yaml
|
||||
cd charts
|
||||
tar zxf redis-*.tgz
|
||||
cd ..
|
||||
```
|
||||
|
||||
- Edit `Chart.yaml` and update the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*' # No need to constraint version, from local files
|
||||
```
|
||||
|
||||
- Run `helm dep update`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the dependency
|
||||
|
||||
- Now we can edit the Service name
|
||||
|
||||
(it should be in `charts/redis/templates/redis-master-svc.yaml`)
|
||||
|
||||
- Then try to deploy the whole chart!
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency multiple times
|
||||
|
||||
- What if we need multiple copies of the same subchart?
|
||||
|
||||
(for instance, if we need two completely different Redis servers)
|
||||
|
||||
- We can declare a dependency multiple times, and specify an `alias`:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: querycache
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: celeryqueue
|
||||
```
|
||||
|
||||
- `.Chart.Name` will be set to the `alias`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compatibility with Helm 2
|
||||
|
||||
- Chart `apiVersion: v1` is the only version supported by Helm 2
|
||||
|
||||
- Chart v1 is also supported by Helm 3
|
||||
|
||||
- Use v1 if you want to be compatible with Helm 2
|
||||
|
||||
- Instead of `Chart.yaml`, dependencies are defined in `requirements.yaml`
|
||||
|
||||
(and we should commit `requirements.lock` instead of `Chart.lock`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Depending on other charts
|
||||
:EN:- Charts within charts
|
||||
|
||||
:FR:- Dépendances entre charts
|
||||
:FR:- Un chart peut en cacher un autre
|
||||
@@ -462,3 +462,17 @@ All unspecified values will take the default values defined in the chart.
|
||||
:FR:- Fonctionnement général de Helm
|
||||
:FR:- Installer des composants via Helm
|
||||
:FR:- Helm 2, Helm 3, et le *Helm Hub*
|
||||
|
||||
:T: Getting started with Helm and its concepts
|
||||
|
||||
:Q: Which comparison is the most adequate?
|
||||
:A: Helm is a firewall, charts are access lists
|
||||
:A: ✔️Helm is a package manager, charts are packages
|
||||
:A: Helm is an artefact repository, charts are artefacts
|
||||
:A: Helm is a CI/CD platform, charts are CI/CD pipelines
|
||||
|
||||
:Q: What's required to distribute a Helm chart?
|
||||
:A: A Helm commercial license
|
||||
:A: A Docker registry
|
||||
:A: An account on the Helm Hub
|
||||
:A: ✔️An HTTP server
|
||||
|
||||
191
slides/k8s/helm-values-schema-validation.md
Normal file
191
slides/k8s/helm-values-schema-validation.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Helm and invalid values
|
||||
|
||||
- A lot of Helm charts let us specify an image tag like this:
|
||||
```bash
|
||||
helm install ... --set image.tag=v1.0
|
||||
```
|
||||
|
||||
- What happens if we make a small mistake, like this:
|
||||
```bash
|
||||
helm install ... --set imagetag=v1.0
|
||||
```
|
||||
|
||||
- Or even, like this:
|
||||
```bash
|
||||
helm install ... --set image=v1.0
|
||||
```
|
||||
|
||||
🤔
|
||||
|
||||
---
|
||||
|
||||
## Making mistakes
|
||||
|
||||
- In the first case:
|
||||
|
||||
- we set `imagetag=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- Helm will ignore that value (if it's not used anywhere in templates)
|
||||
|
||||
- the chart is deployed with the default value instead
|
||||
|
||||
- In the second case:
|
||||
|
||||
- we set `image=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- `image` will be a string instead of an object
|
||||
|
||||
- Helm will *probably* fail when trying to evaluate `image.tag`
|
||||
|
||||
---
|
||||
|
||||
## Preventing mistakes
|
||||
|
||||
- To prevent the first mistake, we need to tell Helm:
|
||||
|
||||
*"let me know if any additional (unknonw) value was set!"*
|
||||
|
||||
- To prevent the second mistake, we need to tell Helm:
|
||||
|
||||
*"`image` should be an object, and `image.tag` should be a string!"*
|
||||
|
||||
- We can do this with *values schema validation*
|
||||
|
||||
---
|
||||
|
||||
## Helm values schema validation
|
||||
|
||||
- We can write a spec representing the possible values accepted by the chart
|
||||
|
||||
- Helm will check the validity of the values before trying to install/upgrade
|
||||
|
||||
- If it finds problems, it will stop immediately
|
||||
|
||||
- The spec uses [JSON Schema](https://json-schema.org/):
|
||||
|
||||
*JSON Schema is a vocabulary that allows you to annotate and validate JSON documents.*
|
||||
|
||||
- JSON Schema is designed for JSON, but can easily work with YAML too
|
||||
|
||||
(or any language with `map|dict|associativearray` and `list|array|sequence|tuple`)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We need to put the JSON Schema spec in a file called `values.schema.json`
|
||||
|
||||
(at the root of our chart; right next to `values.yaml` etc.)
|
||||
|
||||
- The file is optional
|
||||
|
||||
- We don't need to register or declare it in `Chart.yaml` or anywhere
|
||||
|
||||
- Let's write a schema that will verify that ...
|
||||
|
||||
- `image.repository` is an official image (string without slashes or dots)
|
||||
|
||||
- `image.pullPolicy` can only be `Always`, `Never`, `IfNotPresent`
|
||||
|
||||
---
|
||||
|
||||
## `values.schema.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z0-9-_]+$"
|
||||
},
|
||||
"pullPolicy": {
|
||||
"type": "string",
|
||||
"pattern": "^(Always|Never|IfNotPresent)$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing our schema
|
||||
|
||||
- Let's try to install a couple releases with that schema!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try an invalid `pullPolicy`:
|
||||
```bash
|
||||
helm install broken --set image.pullPolicy=ShallNotPass
|
||||
```
|
||||
|
||||
- Try an invalid value:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The first one fails, but the second one still passes ...
|
||||
|
||||
- Why?
|
||||
|
||||
---
|
||||
|
||||
## Bailing out on unkown properties
|
||||
|
||||
- We told Helm what properties (values) were valid
|
||||
|
||||
- We didn't say what to do about additional (unknown) properties!
|
||||
|
||||
- We can fix that with `"additionalProperties": false`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `values.schema.json` to add `"additionalProperties": false`
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
...
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing with unknown properties
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to pass an extra property:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
- Try to pass an extra nested property:
|
||||
```bash
|
||||
helm install does-it-work --set image.hello=world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The first command should break.
|
||||
|
||||
The second will not.
|
||||
|
||||
`"additionalProperties": false` needs to be specified at each level.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Helm schema validation
|
||||
:FR:- Validation de schema Helm
|
||||
@@ -52,7 +52,7 @@
|
||||
|
||||
- There are literally dozens of implementations out there
|
||||
|
||||
(15 are listed in the Kubernetes documentation)
|
||||
(https://github.com/containernetworking/cni/ lists more than 25 plugins)
|
||||
|
||||
- Pods have level 3 (IP) connectivity, but *services* are level 4 (TCP or UDP)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -22,6 +22,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -20,6 +20,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -21,6 +21,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
@@ -64,9 +65,10 @@ content:
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
#- k8s/dryrun.md
|
||||
#- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
|
||||
@@ -23,6 +23,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
@@ -55,6 +56,7 @@ content:
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- - k8s/dashboard.md
|
||||
|
||||
@@ -21,6 +21,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
@@ -65,7 +66,7 @@ content:
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
@@ -75,6 +76,7 @@ content:
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
|
||||
@@ -21,6 +21,7 @@ content:
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
@@ -64,10 +65,11 @@ content:
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
#- k8s/access-eks-cluster.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
|
||||
@@ -1,11 +1,35 @@
|
||||
## Intros
|
||||
|
||||
- Hello! I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
|
||||
- The training will run from 9:30 to 13:00
|
||||
- Hello! We are:
|
||||
|
||||
- There will be a break at (approximately) 11:00
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
|
||||
<!-- .dummy[
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🚁] Alexandre ([@alexbuisine](https://twitter.com/alexbuisine), Enix SAS)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
|
||||
- .emoji[🎧] Romain ([@rdegez](https://twitter.com/rdegez), Enix SAS)
|
||||
|
||||
] -->
|
||||
|
||||
- The workshop will run from ...
|
||||
|
||||
- There will be a lunch break at ...
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
@@ -213,6 +213,7 @@ def processcontent(content, filename):
|
||||
return (content, titles)
|
||||
if os.path.isfile(content):
|
||||
return processcontent(open(content).read(), content)
|
||||
logging.warning("Content spans only one line (it's probably a file name) but no file found: {}".format(content))
|
||||
if isinstance(content, list):
|
||||
subparts = [processcontent(c, filename) for c in content]
|
||||
markdown = "\n---\n".join(c[0] for c in subparts)
|
||||
|
||||
12
slides/shared/chat-room-slack.md
Normal file
12
slides/shared/chat-room-slack.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Chat room
|
||||
|
||||
- A Slack room has been set up for the duration of the training
|
||||
|
||||
- We'll use it to ask questions, get help, share feedback ...
|
||||
|
||||
(let's keep an eye on it during the training!)
|
||||
|
||||
- Reminder, the room is @@CHAT@@
|
||||
|
||||
- Say hi in the chat room!
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Pre-requirements
|
||||
# Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ content:
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -25,6 +25,7 @@ content:
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
@@ -20,6 +20,7 @@ content:
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
|
||||
Reference in New Issue
Block a user