Compare commits
92 Commits
2021-02-en
...
2021-03-lk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1292168d4e | ||
|
|
220103f2fd | ||
|
|
372eb2e717 | ||
|
|
6185ad6ff3 | ||
|
|
ee9c114da0 | ||
|
|
edf496df13 | ||
|
|
018f06a409 | ||
|
|
c283d7e7d6 | ||
|
|
cd9f1cc645 | ||
|
|
d74a331a05 | ||
|
|
53a3c8a86a | ||
|
|
9c3ab19918 | ||
|
|
a8ecffbaf0 | ||
|
|
e75e4d7f2c | ||
|
|
84c33b9eae | ||
|
|
e606cd2b21 | ||
|
|
d217e52ab5 | ||
|
|
f3c3646298 | ||
|
|
f25bf60d46 | ||
|
|
6ab11ca91c | ||
|
|
a5d857edd4 | ||
|
|
25d6073b17 | ||
|
|
216fefad23 | ||
|
|
f3eb9ce12f | ||
|
|
a484425c81 | ||
|
|
67806fc592 | ||
|
|
cfcf874bac | ||
|
|
858afc846c | ||
|
|
629b4d1037 | ||
|
|
58f2894b54 | ||
|
|
df1db67e53 | ||
|
|
068c81bdcd | ||
|
|
911d78aede | ||
|
|
305674fa3c | ||
|
|
6bdc687cc7 | ||
|
|
49e3a0b75f | ||
|
|
5acb05dfff | ||
|
|
edaef92b35 | ||
|
|
63fccb495f | ||
|
|
055c8a7267 | ||
|
|
f72847bc81 | ||
|
|
4be82f4f57 | ||
|
|
cb760dbe94 | ||
|
|
f306749f68 | ||
|
|
8d20fa4654 | ||
|
|
249d446ef2 | ||
|
|
fe84dec863 | ||
|
|
ce8dc2cdff | ||
|
|
bc33f1f5df | ||
|
|
8597ca1956 | ||
|
|
2300d0719b | ||
|
|
2e6230a9a0 | ||
|
|
ae17c2479c | ||
|
|
23f7e8cff9 | ||
|
|
f72cf16c82 | ||
|
|
6ec8849da1 | ||
|
|
6c11de207a | ||
|
|
2295e4f3de | ||
|
|
18853b2497 | ||
|
|
426957bdca | ||
|
|
6bc08c0a7e | ||
|
|
88d4e5ff54 | ||
|
|
e3e4d04202 | ||
|
|
be6d982e2c | ||
|
|
04bc8a9f60 | ||
|
|
b0dc1c7c3f | ||
|
|
bb1b225026 | ||
|
|
2160aa7f40 | ||
|
|
8f75a4cd7f | ||
|
|
45213a8f2e | ||
|
|
f03aedd024 | ||
|
|
fcfcb127b4 | ||
|
|
5380b2d52a | ||
|
|
cc5da860b9 | ||
|
|
9e9b17f6c9 | ||
|
|
b9ea938157 | ||
|
|
b23aacdce0 | ||
|
|
c3d6e5e660 | ||
|
|
907adf8075 | ||
|
|
dff505ac76 | ||
|
|
df0ffc4d75 | ||
|
|
02278b3748 | ||
|
|
ab959220ba | ||
|
|
b4576e39d0 | ||
|
|
894dafeecb | ||
|
|
366c656d82 | ||
|
|
a60f929232 | ||
|
|
fdc58cafda | ||
|
|
fc170fe4a7 | ||
|
|
8de186b909 | ||
|
|
b816d075d4 | ||
|
|
b1adca025d |
@@ -62,11 +62,8 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
@@ -88,7 +85,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
|
||||
@@ -69,11 +69,8 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
@@ -98,7 +95,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
|
||||
24
k8s/openebs-pod.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: openebs-local-hostpath-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: local-hostpath-pvc
|
||||
containers:
|
||||
- name: better
|
||||
image: alpine
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
while true; do
|
||||
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
|
||||
sleep $(($RANDOM % 5 + 20))
|
||||
done
|
||||
volumeMounts:
|
||||
- mountPath: /mnt/storage
|
||||
name: storage
|
||||
|
||||
@@ -3,8 +3,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -26,8 +24,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -49,8 +45,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
|
||||
13
prepare-eks/10_create_cluster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# Create an EKS cluster.
|
||||
# This is not idempotent (each time you run it, it creates a new cluster).
|
||||
|
||||
eksctl create cluster \
|
||||
--node-type=t3.large \
|
||||
--nodes-max=10 \
|
||||
--alb-ingress-access \
|
||||
--asg-access \
|
||||
--ssh-access \
|
||||
--with-oidc \
|
||||
#
|
||||
|
||||
32
prepare-eks/20_create_users.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
# For each user listed in "users.txt", create an IAM user.
|
||||
# Also create AWS API access keys, and store them in "users.keys".
|
||||
# This is idempotent (you can run it multiple times, it will only
|
||||
# create the missing users). However, it will not remove users.
|
||||
# Note that you can remove users from "users.keys" (or even wipe
|
||||
# that file out entirely) and then this script will delete their
|
||||
# keys and generate new keys for them (and add the new keys to
|
||||
# "users.keys".)
|
||||
|
||||
echo "Getting list of existing users ..."
|
||||
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
if ! grep -qw $U users.tmp; then
|
||||
echo "Creating user $U..."
|
||||
aws iam create-user --user-name=$U \
|
||||
--tags=Key=container.training,Value=1
|
||||
fi
|
||||
if ! grep -qw $U users.keys; then
|
||||
echo "Listing keys for user $U..."
|
||||
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
|
||||
for KEY in $KEYS; do
|
||||
echo "Deleting key $KEY for user $U..."
|
||||
aws iam delete-access-key --user=$U --access-key-id=$KEY
|
||||
done
|
||||
echo "Creating access key for user $U..."
|
||||
aws iam create-access-key --user=$U --output json \
|
||||
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
|
||||
>> users.keys
|
||||
fi
|
||||
done
|
||||
51
prepare-eks/30_create_or_update_policy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
|
||||
# This is idempotent, which allows to update the policy document below if
|
||||
# you want the users to do other things as well.
|
||||
# Note that each time you run this script, it will actually create a new
|
||||
# version of the policy, set that version as the default version, and
|
||||
# remove all non-default versions. (Because you can only have up to
|
||||
# 5 versions of a given policy, so you need to clean them up.)
|
||||
# After running that script, you will want to attach the policy to our
|
||||
# users (check the other scripts in that directory).
|
||||
|
||||
POLICY_NAME=user.container.training
|
||||
POLICY_DOC='{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"eks:DescribeCluster"
|
||||
],
|
||||
"Resource": "arn:aws:eks:*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
aws iam create-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--policy-document "$POLICY_DOC" \
|
||||
--set-as-default
|
||||
|
||||
# For reference, the command below creates a policy without versioning:
|
||||
#aws iam create-policy \
|
||||
#--policy-name user.container.training \
|
||||
#--policy-document "$JSON"
|
||||
|
||||
for VERSION in $(
|
||||
aws iam list-policy-versions \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--query 'Versions[?!IsDefaultVersion].VersionId' \
|
||||
--output text)
|
||||
do
|
||||
aws iam delete-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--version-id "$VERSION"
|
||||
done
|
||||
|
||||
# For reference, the command below shows all users using the policy:
|
||||
#aws iam list-entities-for-policy \
|
||||
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
14
prepare-eks/40_attach_policy.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
# Attach our user policy to all the users defined in "users.txt".
|
||||
# This should be idempotent, because attaching the same policy
|
||||
# to the same user multiple times doesn't do anything.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
POLICY_NAME=user.container.training
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
echo "Attaching policy to user $U ..."
|
||||
aws iam attach-user-policy \
|
||||
--user-name $U \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
done
|
||||
24
prepare-eks/50_aws_auth.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
|
||||
# Each user defined in "users.txt" will be mapped to a Kubernetes user
|
||||
# with the same name, and put in the "container.training" group, too.
|
||||
# This is idempotent.
|
||||
# WARNING: this will wipe out the mapUsers component of the aws-auth
|
||||
# ConfigMap, removing all users that aren't in "users.txt".
|
||||
# It won't touch mapRoles, so it shouldn't break the role mappings
|
||||
# put in place by EKS.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
rm -f users.map
|
||||
for U in $(cat users.txt); do
|
||||
echo "\
|
||||
- userarn: arn:aws:iam::$ACCOUNT:user/$U
|
||||
username: $U
|
||||
groups: [ container.training ]\
|
||||
" >> users.map
|
||||
done
|
||||
|
||||
kubectl create --namespace=kube-system configmap aws-auth \
|
||||
--dry-run=client --from-file=mapUsers=users.map -o yaml \
|
||||
| kubectl apply -f-
|
||||
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
# Create a shared Kubernetes Namespace ("container-training") as well as
|
||||
# individual namespaces for every user in "users.txt", and set up a bunch
|
||||
# of permissions.
|
||||
# Specifically:
|
||||
# - each user gets "view" permissions in the "default" Namespace
|
||||
# - each user gets "edit" permissions in the "container-training" Namespace
|
||||
# - each user gets permissions to list Nodes and Namespaces
|
||||
# - each user gets "admin" permissions in their personal Namespace
|
||||
# Note that since Kubernetes Namespaces can't have dots in their names,
|
||||
# if a user has dots, dots will be mapped to dashes.
|
||||
# So user "ada.lovelace" will get namespace "ada-lovelace".
|
||||
# This is kind of idempotent (but will raise a bunch of errors for objects
|
||||
# that already exist).
|
||||
# TODO: if this needs to evolve, replace all the "create" operations by
|
||||
# "apply" operations. But this is good enough for now.
|
||||
|
||||
kubectl create rolebinding --namespace default container.training \
|
||||
--group=container.training --clusterrole=view
|
||||
|
||||
kubectl create clusterrole view-nodes \
|
||||
--verb=get,list,watch --resource=node
|
||||
kubectl create clusterrolebinding view-nodes \
|
||||
--group=container.training --clusterrole=view-nodes
|
||||
|
||||
kubectl create clusterrole view-namespaces \
|
||||
--verb=get,list,watch --resource=namespace
|
||||
kubectl create clusterrolebinding view-namespaces \
|
||||
--group=container.training --clusterrole=view-namespaces
|
||||
|
||||
kubectl create namespace container-training
|
||||
kubectl create rolebinding --namespace container-training edit \
|
||||
--group=container.training --clusterrole=edit
|
||||
|
||||
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
|
||||
# instead of running "kubectl" N times, we generate a bunch of YAML and
|
||||
# apply it. It will still generate a lot of API calls but it's much faster
|
||||
# than calling "kubectl" N times. It might be possible to make this even
|
||||
# faster by generating a "kind: List" (I don't know if this would issue
|
||||
# a single API calls or multiple ones; TBD!)
|
||||
for U in $(cat users.txt); do
|
||||
NS=$(echo $U | tr . -)
|
||||
cat <<EOF
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: $NS
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin
|
||||
namespace: $NS
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: $U
|
||||
EOF
|
||||
done | kubectl create -f-
|
||||
|
||||
76
prepare-eks/70_oidc.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM role to be used by a Kubernetes ServiceAccount.
|
||||
# The role isn't given any permissions yet (this has to be done by
|
||||
# another script in this series), but a properly configured Pod
|
||||
# should still be able to execute "aws sts get-caller-identity"
|
||||
# and confirm that it's using that role.
|
||||
# This requires the cluster to have an attached OIDC provider.
|
||||
# This should be the case if the cluster has been created with
|
||||
# the scripts in this directory; otherwise, this can be done with
|
||||
# the subsequent command, which is idempotent:
|
||||
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
|
||||
# The policy document used below will authorize all ServiceAccounts
|
||||
# in the "container-training" Namespace to use that role.
|
||||
# This script will also annotate the container-training:default
|
||||
# ServiceAccount so that it can use that role.
|
||||
# This script is not quite idempotent: if you want to use a new
|
||||
# trust policy, some work will be required. (You can delete the role,
|
||||
# but that requires detaching the associated policies. There might also
|
||||
# be a way to update the trust policy directly; we didn't investigate this
|
||||
# further at this point.)
|
||||
|
||||
if [ "$1" ]; then
|
||||
CLUSTER="$1"
|
||||
else
|
||||
echo "Please indicate cluster to use. Available clusters:"
|
||||
aws eks list-clusters --output table
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
TRUST_POLICY=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-role \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--assume-role-policy-document "$TRUST_POLICY"
|
||||
|
||||
kubectl annotate serviceaccounts \
|
||||
--namespace container-training default \
|
||||
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
|
||||
--overwrite
|
||||
|
||||
exit
|
||||
|
||||
# Here are commands to delete the role:
|
||||
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
|
||||
aws iam delete-role --role-name $ROLE_NAME
|
||||
|
||||
# Merging the policy with the existing policies:
|
||||
{
|
||||
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
|
||||
echo "$TRUST_POLICY" | jq -r .Statement[]
|
||||
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
|
||||
aws iam update-assume-role-policy \
|
||||
--role-name $ROLE_NAME \
|
||||
--policy-document file:///tmp/policy.json
|
||||
54
prepare-eks/80_s3_bucket.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
# Create an S3 bucket with two objects in it:
|
||||
# - public.txt (world-readable)
|
||||
# - private.txt (private)
|
||||
# Also create an IAM policy granting read-only access to the bucket
|
||||
# (and therefore, to the private object).
|
||||
# Finally, attach the policy to an IAM role (for instance, the role
|
||||
# created by another script in this directory).
|
||||
# This isn't idempotent, but it can be made idempotent by replacing the
|
||||
# "aws iam create-policy" call with "aws iam create-policy-version" and
|
||||
# a bit of extra elbow grease. (See other scripts in this directory for
|
||||
# an example).
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
BUCKET=container.training
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
POLICY_NAME=s3-reader-container-training
|
||||
POLICY_DOC=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::$BUCKET",
|
||||
"arn:aws:s3:::$BUCKET/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-policy \
|
||||
--policy-name $POLICY_NAME \
|
||||
--policy-doc "$POLICY_DOC"
|
||||
|
||||
aws s3 mb s3://container.training
|
||||
|
||||
echo "this is a public object" \
|
||||
| aws s3 cp - s3://container.training/public.txt \
|
||||
--acl public-read
|
||||
|
||||
echo "this is a private object" \
|
||||
| aws s3 cp - s3://container.training/private.txt \
|
||||
--acl private
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
50
prepare-eks/users.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
ada.lovelace
|
||||
adele.goldstine
|
||||
amanda.jones
|
||||
anita.borg
|
||||
ann.kiessling
|
||||
barbara.mcclintock
|
||||
beatrice.worsley
|
||||
bessie.blount
|
||||
betty.holberton
|
||||
beulah.henry
|
||||
carleen.hutchins
|
||||
caroline.herschel
|
||||
dona.bailey
|
||||
dorothy.hodgkin
|
||||
ellen.ochoa
|
||||
edith.clarke
|
||||
elisha.collier
|
||||
elizabeth.feinler
|
||||
emily.davenport
|
||||
erna.hoover
|
||||
frances.spence
|
||||
gertrude.blanch
|
||||
grace.hopper
|
||||
grete.hermann
|
||||
giuliana.tesoro
|
||||
harriet.tubman
|
||||
hedy.lamarr
|
||||
irma.wyman
|
||||
jane.goodall
|
||||
jean.bartik
|
||||
joy.mangano
|
||||
josephine.cochrane
|
||||
katherine.blodgett
|
||||
kathleen.antonelli
|
||||
lynn.conway
|
||||
margaret.hamilton
|
||||
maria.beasley
|
||||
marie.curie
|
||||
marjorie.joyner
|
||||
marlyn.meltzer
|
||||
mary.kies
|
||||
melitta.bentz
|
||||
milly.koss
|
||||
radia.perlman
|
||||
rosalind.franklin
|
||||
ruth.teitelbaum
|
||||
sarah.mather
|
||||
sophie.wilson
|
||||
stephanie.kwolek
|
||||
yvonne.brill
|
||||
@@ -69,11 +69,14 @@ _cmd_deploy() {
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
@@ -102,6 +105,12 @@ _cmd_deploy() {
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# If there is no "python" binary, symlink to python3
|
||||
#pssh "
|
||||
#if ! which python; then
|
||||
# ln -s $(which python3) /usr/local/bin/python
|
||||
#fi"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
@@ -208,7 +217,14 @@ _cmd_kube() {
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Initialize kube master
|
||||
# Disable swap
|
||||
# (note that this won't survive across node reboots!)
|
||||
if [ "$INFRACLASS" = "linode" ]; then
|
||||
pssh "
|
||||
sudo swapoff -a"
|
||||
fi
|
||||
|
||||
# Initialize kube control plane
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
@@ -582,7 +598,7 @@ _cmd_start() {
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--count) die "Flag --count is deprecated; please use --students instead." ;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
--students) STUDENTS=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
|
||||
58
prepare-vms/lib/infra/linode.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
if ! command -v linode-cli >/dev/null; then
|
||||
warn "Linode CLI (linode-cli) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/linode-cli ]; then
|
||||
warn "~/.config/linode-cli not found."
|
||||
fi
|
||||
|
||||
# To view available regions: "linode-cli regions list"
|
||||
LINODE_REGION=${LINODE_REGION-us-west}
|
||||
|
||||
# To view available types: "linode-cli linodes types"
|
||||
LINODE_TYPE=${LINODE_TYPE-g6-standard-2}
|
||||
|
||||
infra_list() {
|
||||
linode-cli linodes list --json |
|
||||
jq -r '.[] | [.id, .label, .status, .type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $LINODE_REGION"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $LINODE_TYPE"
|
||||
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
|
||||
linode-cli linodes create \
|
||||
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
|
||||
--image=linode/ubuntu18.04 \
|
||||
--authorized_keys="${LINODE_SSHKEY}" \
|
||||
--root_pass="${ROOT_PASS}" \
|
||||
--tags=${TAG} --label=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
linode_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
linode_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
linode-cli linodes delete
|
||||
}
|
||||
|
||||
linode_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].id"
|
||||
}
|
||||
|
||||
linode_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]"
|
||||
}
|
||||
@@ -18,11 +18,11 @@ pssh() {
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
if [ "$INFRACLASS" = hetzner ]; then
|
||||
LOGIN=root
|
||||
else
|
||||
LOGIN=ubuntu
|
||||
fi
|
||||
case "$INFRACLASS" in
|
||||
hetzner) LOGIN=root ;;
|
||||
linode) LOGIN=root ;;
|
||||
*) LOGIN=ubuntu ;;
|
||||
esac
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
|
||||
@@ -26,16 +26,16 @@ apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
|
||||
# Figure out if we're called for a bunch of domains, or just one.
|
||||
first_arg = sys.argv[1]
|
||||
if os.path.isfile(first_arg):
|
||||
domains = open(first_arg).read().split()
|
||||
domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [first_arg]
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /lke.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
@@ -329,4 +329,4 @@ This is ideal to debug regressions, do side-by-side comparisons, etc.
|
||||
:EN:- Connecting services together with a *Compose file*
|
||||
|
||||
:FR:- Utiliser Compose pour décrire son environnement
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
:FR:- Écrire un *Compose file* pour connecter les services entre eux
|
||||
|
||||
@@ -742,3 +742,15 @@ class: extra-details
|
||||
* This may be used to access an internal package repository.
|
||||
|
||||
(But try to use a multi-stage build instead, if possible!)
|
||||
|
||||
???
|
||||
|
||||
:EN:Container networking essentials
|
||||
:EN:- The Container Network Model
|
||||
:EN:- Container isolation
|
||||
:EN:- Service discovery
|
||||
|
||||
:FR:Mettre ses conteneurs en réseau
|
||||
:FR:- Le "Container Network Model"
|
||||
:FR:- Isolation des conteneurs
|
||||
:FR:- *Service discovery*
|
||||
|
||||
@@ -229,10 +229,5 @@ containers together without exposing their ports.
|
||||
|
||||
???
|
||||
|
||||
:EN:Connecting containers
|
||||
:EN:- Container networking basics
|
||||
:EN:- Exposing a container
|
||||
|
||||
:FR:Connecter les conteneurs
|
||||
:FR:- Description du modèle réseau des conteneurs
|
||||
:FR:- Exposer un conteneur
|
||||
:EN:- Exposing single containers
|
||||
:FR:- Exposer un conteneur isolé
|
||||
|
||||
@@ -101,5 +101,5 @@ Success!
|
||||
|
||||
???
|
||||
|
||||
:EN:- The build cache
|
||||
:EN:- Leveraging the build cache for faster builds
|
||||
:FR:- Tirer parti du cache afin d'optimiser la vitesse de *build*
|
||||
|
||||
@@ -434,5 +434,12 @@ services:
|
||||
|
||||
???
|
||||
|
||||
:EN:Optimizing images
|
||||
:EN:- Dockerfile tips, tricks, and best practices
|
||||
:FR:- Bonnes pratiques pour la construction des images
|
||||
:EN:- Reducing build time
|
||||
:EN:- Reducing image size
|
||||
|
||||
:FR:Optimiser ses images
|
||||
:FR:- Bonnes pratiques, trucs et astuces
|
||||
:FR:- Réduire le temps de build
|
||||
:FR:- Réduire la taille des images
|
||||
|
||||
@@ -82,3 +82,12 @@ Use cases:
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
???
|
||||
|
||||
:EN:Advanced container networking
|
||||
:EN:- Transparent network access with the "host" driver
|
||||
:EN:- Sharing is caring with the "container" driver
|
||||
|
||||
:FR:Paramétrage réseau avancé
|
||||
:FR:- Accès transparent au réseau avec le mode "host"
|
||||
:FR:- Partage de la pile réseau avece le mode "container"
|
||||
|
||||
3921
slides/images/control-planes/advanced-control-plane-split-events.svg
Normal file
|
After Width: | Height: | Size: 231 KiB |
3596
slides/images/control-planes/advanced-control-plane.svg
Normal file
|
After Width: | Height: | Size: 208 KiB |
1294
slides/images/control-planes/managed-kubernetes.svg
Normal file
|
After Width: | Height: | Size: 71 KiB |
3132
slides/images/control-planes/non-dedicated-stacked-nodes.svg
Normal file
|
After Width: | Height: | Size: 167 KiB |
1611
slides/images/control-planes/single-control-and-workers.svg
Normal file
|
After Width: | Height: | Size: 90 KiB |
914
slides/images/control-planes/single-node-dev.svg
Normal file
@@ -0,0 +1,914 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="1600"
|
||||
height="900"
|
||||
viewBox="0 0 1600 900"
|
||||
version="1.1"
|
||||
id="svg696"
|
||||
sodipodi:docname="single-node-dev.svg"
|
||||
inkscape:version="1.0.2 (e86c870879, 2021-01-15)"
|
||||
enable-background="new">
|
||||
<metadata
|
||||
id="metadata700">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title>how-does-k8s-work</dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1080"
|
||||
id="namedview698"
|
||||
showgrid="false"
|
||||
inkscape:zoom="0.64"
|
||||
inkscape:cx="133.80574"
|
||||
inkscape:cy="440.39529"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="1080"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="how-does-k8s-work"
|
||||
units="px"
|
||||
inkscape:snap-object-midpoints="true"
|
||||
inkscape:document-rotation="0" />
|
||||
<title
|
||||
id="title304">how-does-k8s-work</title>
|
||||
<style
|
||||
type="text/css"
|
||||
id="style5"><![CDATA[
|
||||
@font-face {
|
||||
font-family: "Droid Serif";
|
||||
src: url(https://fonts.gstatic.com/s/droidserif/v9/tDbI2oqRg1oM3QBjjcaDkOr9rAU.woff2) format("woff2");
|
||||
}
|
||||
]]></style>
|
||||
<defs
|
||||
id="defs483">
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker4502"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path4500"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker4492"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path4490"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker3758"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path3756" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker3586"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path3584" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2794"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2792" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2634"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2632" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2202"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2200" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2054"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2052" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2781"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2779" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker2657"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path2655" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2327"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path2325"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2181"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path2179"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker2026"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path2024"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1880"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path1878"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1725"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path1723"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker1613"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path1611"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<linearGradient
|
||||
id="linearGradient15544"
|
||||
osb:paint="solid">
|
||||
<stop
|
||||
style="stop-color:#f7fe9a;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop15542" />
|
||||
</linearGradient>
|
||||
<marker
|
||||
inkscape:stockid="TriangleOutS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker15078"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path15076"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker14924"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true">
|
||||
<path
|
||||
id="path14922"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker6635"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleOutS">
|
||||
<path
|
||||
transform="scale(0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path6633" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker6541"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path6539" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:stockid="TriangleInS"
|
||||
orient="auto"
|
||||
refY="0.0"
|
||||
refX="0.0"
|
||||
id="marker6297"
|
||||
style="overflow:visible"
|
||||
inkscape:isstock="true"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
id="path6295"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
transform="scale(-0.2)" />
|
||||
</marker>
|
||||
<marker
|
||||
inkscape:isstock="true"
|
||||
style="overflow:visible"
|
||||
id="marker4353"
|
||||
refX="0.0"
|
||||
refY="0.0"
|
||||
orient="auto"
|
||||
inkscape:stockid="TriangleInS"
|
||||
inkscape:collect="always">
|
||||
<path
|
||||
transform="scale(-0.2)"
|
||||
style="fill-rule:evenodd;stroke:#cccccc;stroke-width:1pt;stroke-opacity:1;fill:#cccccc;fill-opacity:1"
|
||||
d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
|
||||
id="path4351" />
|
||||
</marker>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1">
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
in="SourceAlpha"
|
||||
result="shadowOffsetOuter1"
|
||||
id="feOffset308" />
|
||||
<feGaussianBlur
|
||||
stdDeviation="2"
|
||||
in="shadowOffsetOuter1"
|
||||
result="shadowBlurOuter1"
|
||||
id="feGaussianBlur310" />
|
||||
<feColorMatrix
|
||||
values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"
|
||||
type="matrix"
|
||||
in="shadowBlurOuter1"
|
||||
result="shadowMatrixOuter1"
|
||||
id="feColorMatrix312" />
|
||||
<feMerge
|
||||
id="feMerge318">
|
||||
<feMergeNode
|
||||
in="shadowMatrixOuter1"
|
||||
id="feMergeNode314" />
|
||||
<feMergeNode
|
||||
in="SourceGraphic"
|
||||
id="feMergeNode316" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
x="-0.039000001"
|
||||
y="-0.096999995"
|
||||
width="1.077"
|
||||
height="1.181"
|
||||
filterUnits="objectBoundingBox"
|
||||
id="filter-1-3">
|
||||
<feOffset
|
||||
dx="0"
|
||||
dy="2"
|
||||
in="SourceAlpha"
|
||||
result="shadowOffsetOuter1"
|
||||
id="feOffset308-6" />
|
||||
<feGaussianBlur
|
||||
stdDeviation="2"
|
||||
in="shadowOffsetOuter1"
|
||||
result="shadowBlurOuter1"
|
||||
id="feGaussianBlur310-7" />
|
||||
<feColorMatrix
|
||||
values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0"
|
||||
type="matrix"
|
||||
in="shadowBlurOuter1"
|
||||
result="shadowMatrixOuter1"
|
||||
id="feColorMatrix312-5" />
|
||||
<feMerge
|
||||
id="feMerge318-3">
|
||||
<feMergeNode
|
||||
in="shadowMatrixOuter1"
|
||||
id="feMergeNode314-5" />
|
||||
<feMergeNode
|
||||
in="SourceGraphic"
|
||||
id="feMergeNode316-6" />
|
||||
</feMerge>
|
||||
</filter>
|
||||
<filter
|
||||
inkscape:collect="always"
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter1101"
|
||||
x="-0.023413722"
|
||||
width="1.0468274"
|
||||
y="-0.023627247"
|
||||
height="1.0472545">
|
||||
<feGaussianBlur
|
||||
inkscape:collect="always"
|
||||
stdDeviation="6.3996521"
|
||||
id="feGaussianBlur1103" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g
|
||||
id="how-does-k8s-work"
|
||||
style="display:inline;fill:none;fill-rule:evenodd;stroke:none;stroke-width:1"
|
||||
transform="translate(240,90)">
|
||||
<path
|
||||
inkscape:connector-curvature="0"
|
||||
id="path3461"
|
||||
d="m 550.17888,-14.918735 c -5.79916,0.29836 -11.4811,1.76683 -16.7125,4.31926 L 305.41221,100.68854 c -11.95688,5.8319 -20.64156,16.86146 -23.59583,29.96674 l -56.2625,249.981 c -2.62478,11.6363 -0.48906,23.8532 5.92083,33.869 0.7693,1.2119 1.59668,2.3849 2.47917,3.515 l 157.85,200.44354 c 8.27676,10.5066 20.82591,16.6243 34.09583,16.6217 l 253.13751,-0.06 c 13.26496,0.01 25.81322,-6.0964 34.09583,-16.5919 L 870.92472,417.96068 c 8.28119,-10.5119 11.38389,-24.2726 8.42917,-37.384 l -56.35,-249.98098 c -2.95427,-13.10528 -11.63895,-24.13483 -23.59583,-29.96674 L 571.32472,-10.599475 c -6.58031,-3.21076 -13.85136,-4.69595 -21.14584,-4.31926 z"
|
||||
style="display:inline;opacity:1;mix-blend-mode:overlay;vector-effect:none;fill:#4285f4;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.78722;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter1101)"
|
||||
transform="matrix(1.1552713,0,0,1.1552713,-85.780113,43.857391)"
|
||||
inkscape:label="control plane" />
|
||||
<text
|
||||
id="text3581"
|
||||
y="763.69812"
|
||||
x="502.07855"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:32.6588px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;filter:url(#filter-1)"
|
||||
xml:space="preserve"
|
||||
transform="matrix(1.4029438,0,0,1.4029438,-157.63347,-1100.6682)"
|
||||
inkscape:label="control plane label"><tspan
|
||||
y="763.69812"
|
||||
x="502.07855"
|
||||
id="tspan3579"
|
||||
sodipodi:role="line">SINGLE-NODE CLUSTER (FOR DEVELOPMENT)</tspan></text>
|
||||
<g
|
||||
id="apiserver"
|
||||
transform="translate(-160.72924,-102.29405)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,261.15864)"
|
||||
ry="5.617908"
|
||||
y="135.0636"
|
||||
x="427.27243"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect3668"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="API server" />
|
||||
<text
|
||||
id="text4504"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
id="tspan4502"
|
||||
sodipodi:role="line">API server</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="controller-manager"
|
||||
transform="translate(-200,22)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,205.00177,315.15864)"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect4506"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="426.37198"
|
||||
y="298.2099"
|
||||
ry="5.617908"
|
||||
inkscape:label="controller manager" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="656.36871"
|
||||
y="606.2431"
|
||||
id="text4510"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan4508"
|
||||
x="656.36871"
|
||||
y="606.2431">controller</tspan><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
id="tspan4524"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="647.06647">manager</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="scheduler"
|
||||
transform="translate(-100,-118)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,-4.99823,167.15864)"
|
||||
ry="5.617908"
|
||||
y="475.73566"
|
||||
x="427.94846"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect4512"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="scheduler" />
|
||||
<text
|
||||
id="text4516"
|
||||
y="628.66296"
|
||||
x="447.62476"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="628.66296"
|
||||
x="447.62476"
|
||||
id="tspan4514"
|
||||
sodipodi:role="line">scheduler</tspan></text>
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="560.76428"
|
||||
y="764.29028"
|
||||
id="text1649"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan1647"
|
||||
x="560.76428"
|
||||
y="764.29028">VM or container</tspan></text>
|
||||
<text
|
||||
id="text3666"
|
||||
y="281.34979"
|
||||
x="-192.40442"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.79323"
|
||||
xml:space="preserve"
|
||||
inkscape:label="emojis"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;stroke-width:1.79323"
|
||||
y="281.34979"
|
||||
x="-192.40442"
|
||||
id="tspan3664"
|
||||
sodipodi:role="line">👩🏼💻👨🏾💻🤖</tspan></text>
|
||||
<rect
|
||||
transform="matrix(0.74849003,0,0,0.42877044,-44.82304,220.38115)"
|
||||
y="149.33455"
|
||||
x="-217.52838"
|
||||
height="357.51495"
|
||||
width="435.94931"
|
||||
id="rect3662"
|
||||
style="display:inline;vector-effect:none;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1-3)"
|
||||
inkscape:label="terminal" />
|
||||
<text
|
||||
inkscape:label="commands"
|
||||
id="text3656"
|
||||
y="331.70175"
|
||||
x="-189.80005"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:35.8105px;line-height:1.25;font-family:Consolas;-inkscape-font-specification:'Consolas, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.895262"
|
||||
xml:space="preserve"><tspan
|
||||
style="stroke-width:0.895262"
|
||||
y="331.70175"
|
||||
x="-189.80005"
|
||||
sodipodi:role="line"
|
||||
id="tspan1145">$ kubectl ...</tspan></text>
|
||||
<text
|
||||
inkscape:label="thumbsup"
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1.79323"
|
||||
x="207.5956"
|
||||
y="359.34979"
|
||||
id="text5150"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan5148"
|
||||
x="207.5956"
|
||||
y="359.34979"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:71.7295px;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;stroke-width:1.79323" /></text>
|
||||
<path
|
||||
inkscape:label="arrow kubectl"
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path1135"
|
||||
d="m 198.47677,432.58136 237.89885,-0.39724"
|
||||
style="display:none;vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker1613);marker-end:url(#marker1725);paint-order:normal" />
|
||||
<path
|
||||
inkscape:label="arrow scheduler"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker4353);paint-order:normal"
|
||||
d="m 414.32586,398.25642 -3.32843,50.64656"
|
||||
id="path4349"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<path
|
||||
inkscape:label="arrow controller manager"
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path6293"
|
||||
d="m 469.44118,396.89114 20.93237,189.75864"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker6297);paint-order:normal" />
|
||||
<path
|
||||
inkscape:label="node top"
|
||||
style="vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker4502);paint-order:normal"
|
||||
d="M 117.18356,331.36155 274.9691,326.32941"
|
||||
id="path4488"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<g
|
||||
id="etcd"
|
||||
transform="translate(1.971505,-80.740088)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,181.15864)"
|
||||
style="display:inline;opacity:1;vector-effect:none;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect4518"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="427.27246"
|
||||
y="-4.9364014"
|
||||
ry="5.617908"
|
||||
inkscape:label="etcd" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.834657"
|
||||
x="552.46265"
|
||||
y="161.46683"
|
||||
id="text4522"
|
||||
transform="translate(0,80)"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
id="tspan4520"
|
||||
x="552.46265"
|
||||
y="161.46683">etcd</tspan></text>
|
||||
<g
|
||||
id="g3228"
|
||||
inkscape:label="storage"
|
||||
style="display:inline"
|
||||
transform="matrix(0.24039167,0,0,0.24784672,397.27503,204.48707)">
|
||||
<ellipse
|
||||
cx="374.0946"
|
||||
cy="234.48322"
|
||||
rx="92.65731"
|
||||
ry="25.358843"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal"
|
||||
id="ellipse9871" />
|
||||
<path
|
||||
sodipodi:nodetypes="cccc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="rect9873"
|
||||
d="M 281.43729,235.006 V -24.92734 H 466.75191 V 235.006"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" />
|
||||
<ellipse
|
||||
ry="25.358843"
|
||||
rx="92.65731"
|
||||
cy="-24.750473"
|
||||
cx="374.0946"
|
||||
id="path9869"
|
||||
style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:5;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" />
|
||||
</g>
|
||||
</g>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:1.25;font-family:'Noto Color Emoji';-inkscape-font-specification:'Noto Color Emoji, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none"
|
||||
x="-314.87122"
|
||||
y="76.790283"
|
||||
id="text5696"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan5694"
|
||||
x="-314.87122"
|
||||
y="76.790283" /></text>
|
||||
<g
|
||||
id="g6027"
|
||||
transform="translate(173.26362,-123.65545)"
|
||||
inkscape:label="kubelet">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,99.00177,261.15864)"
|
||||
ry="5.617908"
|
||||
y="135.0636"
|
||||
x="427.27243"
|
||||
height="125.52966"
|
||||
width="231.99153"
|
||||
id="rect6021"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
inkscape:label="rect" />
|
||||
<text
|
||||
id="text6025"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
y="438.31876"
|
||||
x="552.05261"
|
||||
id="tspan6023"
|
||||
sodipodi:role="line">kubelet</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g6159"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-465.32975,388.44365)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path6139" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path6141" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path6143" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text6147"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan6145"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g7100"
|
||||
inkscape:label="container engine"
|
||||
transform="translate(780.76442,-206.55137)">
|
||||
<g
|
||||
id="g7089"
|
||||
transform="translate(-657.05924,68.771622)">
|
||||
<rect
|
||||
transform="matrix(0.83465672,0,0,0.83465672,205.00177,315.15864)"
|
||||
style="display:inline;vector-effect:none;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:2.78697;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;filter:url(#filter-1)"
|
||||
id="rect7081"
|
||||
width="231.99153"
|
||||
height="125.52966"
|
||||
x="426.37198"
|
||||
y="298.2099"
|
||||
ry="5.617908"
|
||||
inkscape:label="controller manager" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.834657"
|
||||
x="656.36871"
|
||||
y="606.2431"
|
||||
id="text7087"><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
id="tspan7085"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="606.2431">container</tspan><tspan
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:32.6587px;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:center;writing-mode:lr-tb;text-anchor:middle;stroke-width:0.834657"
|
||||
sodipodi:role="line"
|
||||
x="656.36871"
|
||||
y="647.06647"
|
||||
id="tspan7093">engine</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<path
|
||||
sodipodi:nodetypes="cc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path2048"
|
||||
d="m 718.47061,417.59763 2.84701,-46.58932"
|
||||
style="display:inline;fill:#cccccc;fill-rule:evenodd;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker2054);marker-end:url(#marker2202);paint-order:normal"
|
||||
inkscape:label="node top left" />
|
||||
<path
|
||||
inkscape:label="arrow etcd"
|
||||
style="display:inline;fill:#cccccc;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker6635);paint-order:normal"
|
||||
d="m 465.71622,277.64 30.73977,-64.3282"
|
||||
id="path6537"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<path
|
||||
inkscape:label="node top"
|
||||
style="display:inline;vector-effect:none;fill:#cccccc;fill-opacity:1;stroke:#cccccc;stroke-width:20;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:3;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker1880);marker-end:url(#marker2026);paint-order:normal"
|
||||
d="m 505.55141,327.11713 103.53017,-3.03009"
|
||||
id="path8569"
|
||||
inkscape:connector-curvature="0"
|
||||
sodipodi:nodetypes="cc" />
|
||||
<g
|
||||
id="g7154"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-578.21351,370.84416)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path7144" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path7146" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path7148" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text7152"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan7150"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
<g
|
||||
id="g7166"
|
||||
inkscape:label="pod"
|
||||
style="display:inline"
|
||||
transform="translate(-606.70424,238.95491)">
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,188.949 38.7689,-11.2425 38.7688,11.2425 -38.7688,11.24254 z"
|
||||
id="path7156" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1177.078,193.25418 v 41.2523 l 36.1218,20.00898 0.1788,-50.46488 z"
|
||||
id="path7158" />
|
||||
<path
|
||||
inkscape:export-ydpi="376.57999"
|
||||
inkscape:export-xdpi="376.57999"
|
||||
style="fill:#eeeeee;fill-rule:evenodd;stroke:#000000;stroke-width:2.74114;stroke-linecap:square;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
inkscape:connector-curvature="0"
|
||||
d="m 1254.6157,193.25418 v 41.2523 l -36.1217,20.00898 -0.1788,-50.46488 z"
|
||||
id="path7160" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:22.0298px;line-height:1.25;font-family:'Droid Serif';-inkscape-font-specification:'Droid Serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.550744"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
id="text7164"><tspan
|
||||
sodipodi:role="line"
|
||||
id="tspan7162"
|
||||
x="1195.4893"
|
||||
y="274.76129"
|
||||
style="stroke-width:0.550744">pod</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 44 KiB |
3940
slides/images/control-planes/stacked-control-plane.svg
Normal file
|
After Width: | Height: | Size: 234 KiB |
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
#- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Container_Networking_Basics.md
|
||||
#- containers/Network_Drivers.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Multi_Stage_Builds.md
|
||||
#- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Advanced_Dockerfiles.md
|
||||
#- containers/Init_Systems.md
|
||||
#- containers/Application_Configuration.md
|
||||
#- containers/Logging.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Container_Engines.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
#- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,70 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Pods_Anatomy.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,78 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
-
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Start_And_Attach.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- # DAY 2
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
-
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
-
|
||||
- containers/Installing_Docker.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Init_Systems.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
-
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Orchestration_Overview.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
#-
|
||||
#- containers/Docker_Machine.md
|
||||
#- containers/Ambassadors.md
|
||||
#- containers/Namespaces_Cgroups.md
|
||||
#- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
#- containers/Pods_Anatomy.md
|
||||
#- containers/Ecosystem.md
|
||||
104
slides/k8s/access-eks-cluster.md
Normal file
@@ -0,0 +1,104 @@
|
||||
## Accessing our EKS cluster
|
||||
|
||||
- We also have a shared EKS cluster
|
||||
|
||||
- With individual IAM users
|
||||
|
||||
- Let's connect to this cluster!
|
||||
|
||||
---
|
||||
|
||||
## What we need
|
||||
|
||||
- `kubectl` (obviously!)
|
||||
|
||||
- `aws` CLI (recent-ish version)
|
||||
|
||||
(or `aws` CLI + `aws-iam-authenticator` plugin)
|
||||
|
||||
- AWS API access key and secret access key
|
||||
|
||||
- AWS region
|
||||
|
||||
- EKS cluster name
|
||||
|
||||
---
|
||||
|
||||
## Setting up AWS credentials
|
||||
|
||||
- There are many ways to do this
|
||||
|
||||
- We're going to use environment variables
|
||||
|
||||
- You're welcome to use whatever you like (e.g. AWS profiles)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Set the AWS region, API access key, and secret key:
|
||||
```bash
|
||||
export AWS_DEFAULT_REGION=`us-east-2`
|
||||
export AWS_ACCESS_KEY_ID=`AKI...`
|
||||
export AWS_SECRET_ACCESS_KEY=`xyz123...`
|
||||
```
|
||||
|
||||
- Check that the AWS API recognizes us:
|
||||
```bash
|
||||
aws sts get-caller-identity
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating our kubeconfig file
|
||||
|
||||
- Now we can use the AWS CLI to:
|
||||
|
||||
- obtain the Kubernetes API address
|
||||
|
||||
- register it in our kubeconfig file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update our kubeconfig file:
|
||||
```bash
|
||||
aws eks update-kubeconfig --name `fancy-clustername-1234`
|
||||
```
|
||||
|
||||
- Run some harmless command:
|
||||
```bash
|
||||
kubectl version
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our resources
|
||||
|
||||
- We have the following permissions:
|
||||
|
||||
- `view` in the `default` namespace
|
||||
|
||||
- `edit` in the `container-training` namespace
|
||||
|
||||
- `admin` in our personal namespace
|
||||
|
||||
- Our personal namespace is our IAM user name
|
||||
|
||||
(but with dots replaced with dashes)
|
||||
|
||||
- For instance, user `ada.lovelace` has namespace `ada-lovelace`
|
||||
|
||||
---
|
||||
|
||||
## Deploying things
|
||||
|
||||
- Let's deploy DockerCoins in our personal namespace!
|
||||
|
||||
- Expose the Web UI with a `LoadBalancer` service
|
||||
|
||||
???
|
||||
|
||||
:EN:- Working with an EKS cluster
|
||||
:FR:- Travailler avec un cluster EKS
|
||||
@@ -1,41 +1,13 @@
|
||||
# Accessing internal services
|
||||
|
||||
- When we are logged in on a cluster node, we can access internal services
|
||||
|
||||
(by virtue of the Kubernetes network model: all nodes can reach all pods and services)
|
||||
|
||||
- When we are accessing a remote cluster, things are different
|
||||
|
||||
(generally, our local machine won't have access to the cluster's internal subnet)
|
||||
|
||||
- How can we temporarily access a service without exposing it to everyone?
|
||||
|
||||
--
|
||||
|
||||
- `kubectl proxy`: gives us access to the API, which includes a proxy for HTTP resources
|
||||
|
||||
- `kubectl port-forward`: allows forwarding of TCP ports to arbitrary pods, services, ...
|
||||
|
||||
---
|
||||
|
||||
## Suspension of disbelief
|
||||
|
||||
The exercises in this section assume that we have set up `kubectl` on our
|
||||
local machine in order to access a remote cluster.
|
||||
|
||||
We will therefore show how to access services and pods of the remote cluster,
|
||||
from our local machine.
|
||||
|
||||
You can also run these exercises directly on the cluster (if you haven't
|
||||
installed and set up `kubectl` locally).
|
||||
|
||||
Running commands locally will be less useful
|
||||
(since you could access services and pods directly),
|
||||
but keep in mind that these commands will work anywhere as long as you have
|
||||
installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` in theory
|
||||
|
||||
- Running `kubectl proxy` gives us access to the entire Kubernetes API
|
||||
@@ -56,7 +28,7 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl proxy` in practice
|
||||
|
||||
- Let's access the `webui` service through `kubectl proxy`
|
||||
- Let's access the `web` service through `kubectl proxy`
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -65,9 +37,9 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
kubectl proxy &
|
||||
```
|
||||
|
||||
- Access the `webui` service:
|
||||
- Access the `web` service:
|
||||
```bash
|
||||
curl localhost:8001/api/v1/namespaces/default/services/webui/proxy/index.html
|
||||
curl localhost:8001/api/v1/namespaces/default/services/web/proxy/
|
||||
```
|
||||
|
||||
- Terminate the proxy:
|
||||
@@ -99,22 +71,20 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
## `kubectl port-forward` in practice
|
||||
|
||||
- Let's access our remote Redis server
|
||||
- Let's access our remote NGINX server
|
||||
|
||||
.exercise[
|
||||
|
||||
- Forward connections from local port 10000 to remote port 6379:
|
||||
- Forward connections from local port 1234 to remote port 80:
|
||||
```bash
|
||||
kubectl port-forward svc/redis 10000:6379 &
|
||||
kubectl port-forward svc/web 1234:80 &
|
||||
```
|
||||
|
||||
- Connect to the Redis server:
|
||||
- Connect to the NGINX server:
|
||||
```bash
|
||||
telnet localhost 10000
|
||||
curl localhost:1234
|
||||
```
|
||||
|
||||
- Issue a few commands, e.g. `INFO server` then `QUIT`
|
||||
|
||||
<!--
|
||||
```wait Connected to localhost```
|
||||
```keys INFO server```
|
||||
@@ -134,3 +104,17 @@ installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
:EN:- Securely accessing internal services
|
||||
:FR:- Accès sécurisé aux services internes
|
||||
|
||||
:T: Accessing internal services from our local machine
|
||||
|
||||
:Q: What's the advantage of "kubectl port-forward" compared to a NodePort?
|
||||
:A: It can forward arbitrary protocols
|
||||
:A: It doesn't require Kubernetes API credentials
|
||||
:A: It offers deterministic load balancing (instead of random)
|
||||
:A: ✔️It doesn't expose the service to the public
|
||||
|
||||
:Q: What's the security concept behind "kubectl port-forward"?
|
||||
:A: ✔️We authenticate with the Kubernetes API, and it forwards connections on our behalf
|
||||
:A: It detects our source IP address, and only allows connections coming from it
|
||||
:A: It uses end-to-end mTLS (mutual TLS) to authenticate our connections
|
||||
:A: There is no security (as long as it's running, anyone can connect from anywhere)
|
||||
|
||||
@@ -733,17 +733,19 @@ class: extra-details
|
||||
|
||||
## Figuring out who can do what
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
- For auditing purposes, sometimes we want to know who can perform which actions
|
||||
|
||||
- There are a few tools to help us with that
|
||||
- There are a few tools to help us with that, available as `kubectl` plugins:
|
||||
|
||||
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
- `kubectl who-can` / [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
|
||||
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
|
||||
- `kubectl access-matrix` / [Rakkess (Review Access)](https://github.com/corneliusweig/rakkess) by Cornelius Weig
|
||||
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
- `kubectl rbac-lookup` / [RBAC Lookup](https://github.com/FairwindsOps/rbac-lookup) by FairwindsOps
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
- `kubectl` plugins can be installed and managed with `krew`
|
||||
|
||||
- They can also be installed and executed as standalone programs
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Authoring YAML
|
||||
|
||||
- There are various ways to generate YAML with Kubernetes, e.g.:
|
||||
- We have already generated YAML implicitly, with e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
@@ -32,26 +32,63 @@
|
||||
|
||||
---
|
||||
|
||||
## We don't have to start from scratch
|
||||
## Various ways to write YAML
|
||||
|
||||
- Create a resource (e.g. Deployment)
|
||||
- Completely from scratch with our favorite editor
|
||||
|
||||
- Dump its YAML with `kubectl get -o yaml ...`
|
||||
(yeah, right)
|
||||
|
||||
- Edit the YAML
|
||||
- Dump an existing resource with `kubectl get -o yaml ...`
|
||||
|
||||
- Use `kubectl apply -f ...` with the YAML file to:
|
||||
(it is recommended to clean up the result)
|
||||
|
||||
- update the resource (if it's the same kind)
|
||||
- Ask `kubectl` to generate the YAML
|
||||
|
||||
- create a new resource (if it's a different kind)
|
||||
(with a `kubectl create --dry-run -o yaml`)
|
||||
|
||||
- Or: Use The Docs, Luke
|
||||
- Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML from scratch
|
||||
|
||||
- Start with a namespace:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello
|
||||
```
|
||||
|
||||
- We can use `kubectl explain` to see resource definitions:
|
||||
```bash
|
||||
kubectl explain -r pod.spec
|
||||
```
|
||||
|
||||
- Not the easiest option!
|
||||
|
||||
---
|
||||
|
||||
## Dump the YAML for an existing resource
|
||||
|
||||
- `kubectl get -o yaml` works!
|
||||
|
||||
- A lot of fields in `metadata` are not necessary
|
||||
|
||||
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
|
||||
|
||||
- Most objects will have a `status` field that is not necessary
|
||||
|
||||
- Default or empty values can also be removed for clarity
|
||||
|
||||
- This can be done manually or with the `kubectl-neat` plugin
|
||||
|
||||
`kubectl get -o yaml ... | kubectl neat`
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run` option
|
||||
@@ -63,14 +100,18 @@
|
||||
kubectl create deployment web --image nginx --dry-run
|
||||
```
|
||||
|
||||
- Optionally clean it up with `kubectl neat`, too
|
||||
|
||||
]
|
||||
|
||||
- We can clean up that YAML even more if we want
|
||||
Note: in recent versions of Kubernetes, we should use `--dry-run=client`
|
||||
|
||||
(for instance, we can remove the `creationTimestamp` and empty dicts)
|
||||
(Or `--dry-run=server`; more on that later!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using `--dry-run` with `kubectl apply`
|
||||
|
||||
- The `--dry-run` option can also be used with `kubectl apply`
|
||||
@@ -87,6 +128,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The limits of `kubectl apply --dry-run`
|
||||
|
||||
.exercise[
|
||||
@@ -112,6 +155,8 @@ The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
|
||||
@@ -135,6 +180,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
@@ -149,6 +196,8 @@ Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- Kubernetes 1.13 also introduced `kubectl diff`
|
||||
@@ -209,3 +258,8 @@ Note: we don't need to specify `--validate=false` here.
|
||||
- check that it still works!
|
||||
|
||||
- That YAML will be useful later when using e.g. Kustomize or Helm
|
||||
|
||||
???
|
||||
|
||||
:EN:- Techniques to write YAML manifests
|
||||
:FR:- Comment écrire des *manifests* YAML
|
||||
@@ -58,25 +58,20 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the namespace for cert-manager:
|
||||
- Let's install the cert-manager Helm chart with this one-liner:
|
||||
```bash
|
||||
kubectl create ns cert-manager
|
||||
```
|
||||
|
||||
- Add the Jetstack repository:
|
||||
```bash
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
```
|
||||
|
||||
- Install cert-manager:
|
||||
```bash
|
||||
helm install cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--set installCRDs=true
|
||||
helm install cert-manager cert-manager \
|
||||
--repo https://charts.jetstack.io \
|
||||
--create-namespace --namespace cert-manager \
|
||||
--set installCRDs=true
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- If you prefer to install with a single YAML file, that's fine too!
|
||||
|
||||
(see [the documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-regular-manifests) for instructions)
|
||||
|
||||
---
|
||||
|
||||
## ClusterIssuer manifest
|
||||
@@ -89,13 +84,15 @@
|
||||
|
||||
## Creating the ClusterIssuer
|
||||
|
||||
- The manifest shown on the previous slide is in @@LINK[k8s/cm-clusterissuer.yaml]
|
||||
- Download the file @@LINK[k8s/cm-clusterissuer.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the ClusterIssuer:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/cm-clusterissuer.yaml
|
||||
kubectl apply cm-clusterissuer.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -118,7 +115,9 @@
|
||||
|
||||
## Creating the Certificate
|
||||
|
||||
- The manifest shown on the previous slide is in @@LINK[k8s/cm-certificate.yaml]
|
||||
- Download the file @@LINK[k8s/cm-certificate.yaml]
|
||||
|
||||
(or copy-paste from the previous slide)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -128,7 +127,7 @@
|
||||
|
||||
- Create the Certificate:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/cm-certificate.yaml
|
||||
kubectl apply -f cm-certificate.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -175,25 +174,14 @@
|
||||
|
||||
---
|
||||
|
||||
## What's missing ?
|
||||
## And then...
|
||||
|
||||
--
|
||||
|
||||
An Ingress Controller! 😅
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install an Ingress Controller:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik-v2.yaml
|
||||
```
|
||||
|
||||
- Wait a little bit, and check that we now have a `kubernetes.io/tls` Secret:
|
||||
- A little bit later, we will have a `kubernetes.io/tls` Secret:
|
||||
```bash
|
||||
kubectl get secrets
|
||||
```
|
||||
|
||||
]
|
||||
- Note that this might take a few minutes, because of the DNS integration!
|
||||
|
||||
---
|
||||
|
||||
@@ -223,22 +211,23 @@ spec:
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Let's Encrypt and nip.io
|
||||
## Automatic TLS Ingress with annotations
|
||||
|
||||
- Let's Encrypt has [rate limits](https://letsencrypt.org/docs/rate-limits/) per domain
|
||||
- It is also possible to annotate Ingress resources for cert-manager
|
||||
|
||||
(the limits only apply to the production environment, not staging)
|
||||
- If we annotate an Ingress resource with `cert-manager.io/cluster-issuer=xxx`:
|
||||
|
||||
- There is a limit of 50 certificates per registered domain
|
||||
- cert-manager will detect that annotation
|
||||
|
||||
- If we try to use the production environment, we will probably hit the limit
|
||||
- it will obtain a certificate using the specified ClusterIssuer (`xxx`)
|
||||
|
||||
- It's fine to use the staging environment for these experiments
|
||||
- it will store the key and certificate in the specified Secret
|
||||
|
||||
(our certs won't validate in a browser, but we can always check
|
||||
the details of the cert to verify that it was issued by Let's Encrypt!)
|
||||
- Note: the Ingress still needs the `tls` section with `secretName` and `hosts`
|
||||
|
||||
???
|
||||
|
||||
:EN:- Obtaining certificates with cert-manager
|
||||
:FR:- Obtenir des certificats avec cert-manager
|
||||
|
||||
:T: Obtaining TLS certificates with cert-manager
|
||||
|
||||
@@ -220,6 +220,41 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How many nodes should a cluster have?
|
||||
|
||||
@@ -164,154 +164,493 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Ingress
|
||||
|
||||
- We will assume that we have a domain name pointing to our cluster
|
||||
|
||||
(i.e. with a wildcard record pointing to at least one node of the cluster)
|
||||
|
||||
- We will get traffic in the cluster by leveraging `ExternalIPs` services
|
||||
|
||||
(but it would be easy to use `LoadBalancer` services instead)
|
||||
|
||||
- We will use Traefik as the ingress controller
|
||||
|
||||
(but any other one should work too)
|
||||
|
||||
- We will use cert-manager to obtain certificates with Let's Encrypt
|
||||
|
||||
---
|
||||
|
||||
## Other details
|
||||
## Install GitLab itself
|
||||
|
||||
- We will deploy GitLab with its official Helm chart
|
||||
|
||||
- It will still require a bunch of parameters and customization
|
||||
|
||||
- We also need a Storage Class
|
||||
|
||||
(unless our cluster already has one, of course)
|
||||
|
||||
- We suggest the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner)
|
||||
- Brace!
|
||||
|
||||
---
|
||||
|
||||
## Setting everything up
|
||||
## Installing the GitLab chart
|
||||
|
||||
1. `git clone https://github.com/jpetazzo/kubecoin`
|
||||
```bash
|
||||
helm repo add gitlab https://charts.gitlab.io/
|
||||
DOMAIN=`cloudnative.party`
|
||||
ISSUER=letsencrypt-production
|
||||
helm upgrade --install gitlab gitlab/gitlab \
|
||||
--create-namespace --namespace gitlab \
|
||||
--set global.hosts.domain=$DOMAIN \
|
||||
--set certmanager.install=false \
|
||||
--set nginx-ingress.enabled=false \
|
||||
--set global.ingress.class=traefik \
|
||||
--set global.ingress.provider=traefik \
|
||||
--set global.ingress.configureCertmanager=false \
|
||||
--set global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER \
|
||||
--set gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls \
|
||||
--set registry.ingress.tls.secretName=gitlab-registry-tls \
|
||||
--set minio.ingress.tls.secretName=gitlab-minio-tls
|
||||
```
|
||||
|
||||
2. `export EMAIL=xxx@example.com DOMAIN=awesome-kube-ci.io`
|
||||
|
||||
(we need a real email address and a domain pointing to the cluster!)
|
||||
|
||||
3. `. setup-gitlab-on-k8s.rc`
|
||||
|
||||
(this doesn't do anything, but defines a number of helper functions)
|
||||
|
||||
4. Execute each helper function, one after another
|
||||
|
||||
(try `do_[TAB]` to see these functions)
|
||||
😰 Can we talk about all these parameters?
|
||||
|
||||
---
|
||||
|
||||
## Local Storage
|
||||
## Breaking down all these parameters
|
||||
|
||||
`do_1_localstorage`
|
||||
- `certmanager.install=false`
|
||||
|
||||
Applies the YAML directly from Rancher's repository.
|
||||
do not install cert-manager, we already have it
|
||||
|
||||
Annotate the Storage Class so that it becomes the default one.
|
||||
- `nginx-ingress.enabled=false`
|
||||
|
||||
do not install the NGINX ingress controller, we already have Traefik
|
||||
|
||||
- `global.ingress.class=traefik`, `global.ingress.provider=traefik`
|
||||
|
||||
these merely enable creation of Ingress resources
|
||||
|
||||
- `global.ingress.configureCertmanager=false`
|
||||
|
||||
do not create a cert-manager Issuer or ClusterIssuer, we have ours
|
||||
|
||||
---
|
||||
|
||||
## Traefik
|
||||
## More parameters
|
||||
|
||||
`do_2_traefik_with_externalips`
|
||||
- `global.ingress.annotations."cert-manager\.io/cluster-issuer"=$ISSUER`
|
||||
|
||||
Install the official Traefik Helm chart.
|
||||
this annotation tells cert-manager to automatically issue certs
|
||||
|
||||
Instead of a `LoadBalancer` service, use a `ClusterIP` with `ExternalIPs`.
|
||||
- `gitlab.webservice.ingress.tls.secretName=gitlab-gitlab-tls`,
|
||||
<br/>
|
||||
`registry.ingress.tls.secretName=gitlab-registry-tls`,
|
||||
<br/>
|
||||
`minio.ingress.tls.secretName=gitlab-minio-tls`
|
||||
|
||||
Automatically infer the `ExternalIPs` from `kubectl get nodes`.
|
||||
|
||||
Enable TLS.
|
||||
these annotations enable TLS in the Ingress controller
|
||||
|
||||
---
|
||||
|
||||
## cert-manager
|
||||
## Wait for GitLab to come up
|
||||
|
||||
`do_3_certmanager`
|
||||
- Let's watch what's happening in the GitLab namespace:
|
||||
```bash
|
||||
watch kubectl get all --namespace gitlab
|
||||
```
|
||||
|
||||
Install cert-manager using their official YAML.
|
||||
- We want to wait for all the Pods to be "Running" or "Completed"
|
||||
|
||||
Easy-peasy.
|
||||
- This will take a few minutes (10-15 minutes for me)
|
||||
|
||||
- Don't worry if you see Pods crashing and restarting
|
||||
|
||||
(it happens when they are waiting on a dependency which isn't up yet)
|
||||
|
||||
---
|
||||
|
||||
## Certificate issuers
|
||||
## Things that could go wrong
|
||||
|
||||
`do_4_issuers`
|
||||
- Symptom: Pods remain "Pending" or "ContainerCreating" for a while
|
||||
|
||||
Create a couple of `ClusterIssuer` resources for cert-manager.
|
||||
- Investigate these pods (with `kubectl describe pod ...`)
|
||||
|
||||
(One for the staging Let's Encrypt environment, one for production.)
|
||||
- Also look at events:
|
||||
```bash
|
||||
kubectl get events \
|
||||
--field-selector=type=Warning --sort-by=metadata.creationTimestamp
|
||||
```
|
||||
|
||||
Note: this requires to specify a valid `$EMAIL` address!
|
||||
- Make sure your cluster is big enough
|
||||
|
||||
Note: if this fails, wait a bit and try again (cert-manager needs to be up).
|
||||
(I use 3 `g6-standard-4` nodes)
|
||||
|
||||
---
|
||||
|
||||
## GitLab
|
||||
## Log into GitLab
|
||||
|
||||
`do_5_gitlab`
|
||||
- First, let's check that we can connect to GitLab (with TLS):
|
||||
|
||||
Deploy GitLab using their official Helm chart.
|
||||
`https://gitlab.$DOMAIN`
|
||||
|
||||
We pass a lot of parameters to this chart:
|
||||
- the domain name to use
|
||||
- disable GitLab's own ingress and cert-manager
|
||||
- annotate the ingress resources so that cert-manager kicks in
|
||||
- bind the shell service (git over SSH) to port 222 to avoid conflict
|
||||
- use ExternalIPs for that shell service
|
||||
- It's asking us for a login and password!
|
||||
|
||||
Note: on modest cloud instances, it can take 10 minutes for GitLab to come up.
|
||||
|
||||
We can check the status with `kubectl get pods --namespace=gitlab`
|
||||
- The login is `root`, and the password is stored in a Secret:
|
||||
```bash
|
||||
kubectl get secrets --namespace=gitlab gitlab-gitlab-initial-root-password \
|
||||
-o jsonpath={.data.password} | base64 -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Log into GitLab and configure it
|
||||
## Configure GitLab
|
||||
|
||||
`do_6_showlogin`
|
||||
- For simplicity, we're going to use that "root" user
|
||||
|
||||
This will get the GitLab root password (stored in a Secret).
|
||||
(but later, you can create multiple users, teams, etc.)
|
||||
|
||||
Then we need to:
|
||||
- log into GitLab
|
||||
- add our SSH key (top-right user menu → settings, then SSH keys on the left)
|
||||
- create a project (using the + menu next to the search bar on top)
|
||||
- go to project configuration (on the left, settings → CI/CD)
|
||||
- add a `KUBECONFIG` file variable with the content of our `.kube/config` file
|
||||
- go to settings → access tokens to create a read-only registry token
|
||||
- add variables `REGISTRY_USER` and `REGISTRY_PASSWORD` with that token
|
||||
- push our repo (`git remote add gitlab ...` then `git push gitlab ...`)
|
||||
- First, let's add our SSH key
|
||||
|
||||
(top-right user menu → settings, then SSH keys on the left)
|
||||
|
||||
- Then, create a project
|
||||
|
||||
(using the + menu next to the search bar on top)
|
||||
|
||||
- Let's call it `kubecoin`
|
||||
|
||||
(you can change it, but you'll have to adjust Git paths later on)
|
||||
|
||||
---
|
||||
|
||||
## Monitoring progress and troubleshooting
|
||||
## Try to push our repository
|
||||
|
||||
- Click on "CI/CD" in the left bar to view pipelines
|
||||
- This is the repository that we're going to use:
|
||||
|
||||
- If you see a permission issue mentioning `system:serviceaccount:gitlab:...`:
|
||||
https://github.com/jpetazzo/kubecoin
|
||||
|
||||
*make sure you did set `KUBECONFIG` correctly!*
|
||||
- Let's clone that repository locally first:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubecoin
|
||||
```
|
||||
|
||||
- GitLab will create namespaces named `gl-<user>-<project>`
|
||||
- Add our GitLab instance as a remote:
|
||||
```bash
|
||||
git remote add gitlab git@gitlab.$DOMAIN:root/kubecoin.git
|
||||
```
|
||||
|
||||
- At the end of the deployment, the web UI will be available on some unique URL
|
||||
- Try to push:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
(`http://<user>-<project>-<githash>-gitlab.<domain>`)
|
||||
---
|
||||
|
||||
## Connection refused?
|
||||
|
||||
- Normally, we get the following error:
|
||||
|
||||
`port 22: Connection refused`
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
--
|
||||
|
||||
- What does `gitlab.$DOMAIN` point to?
|
||||
|
||||
--
|
||||
|
||||
- Our Ingress Controller! (i.e. Traefik) 💡
|
||||
|
||||
- Our Ingress Controller has nothing to do with port 22
|
||||
|
||||
- So how do we solve this?
|
||||
|
||||
---
|
||||
|
||||
## Routing port 22
|
||||
|
||||
- Whatever is on `gitlab.$DOMAIN` needs to have the following "routing":
|
||||
|
||||
- port 80 → GitLab web service
|
||||
|
||||
- port 443 → GitLab web service, with TLS
|
||||
|
||||
- port 22 → GitLab shell service
|
||||
|
||||
- Currently, Traefik is managing `gitlab.$DOMAIN`
|
||||
|
||||
- We are going to tell Traefik to:
|
||||
|
||||
- accept connections on port 22
|
||||
|
||||
- send them to GitLab
|
||||
|
||||
---
|
||||
|
||||
## TCP routing
|
||||
|
||||
- The technique that we are going to use is specific to Traefik
|
||||
|
||||
- Other Ingress Controllers may or may not have similar features
|
||||
|
||||
- When they have similar features, they will be enabled very differently
|
||||
|
||||
---
|
||||
|
||||
## Telling Traefik to open port 22
|
||||
|
||||
- Let's reconfigure Traefik:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true" \
|
||||
--set "providers.kubernetesIngress.publishedService.enabled=true" \
|
||||
--set "ports.ssh.port=2222" \
|
||||
--set "ports.ssh.exposedPort=22" \
|
||||
--set "ports.ssh.expose=true" \
|
||||
--set "ports.ssh.protocol=TCP"
|
||||
```
|
||||
|
||||
- This creates a new "port" on Traefik, called "ssh", listening on port 22
|
||||
|
||||
- Internally, Traefik listens on port 2222 (for permission reasons)
|
||||
|
||||
- Note: Traefik docs also call these ports "entrypoints"
|
||||
|
||||
(these entrypoints are totally unrelated to the `ENTRYPOINT` in Dockerfiles)
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22
|
||||
|
||||
- What happens if we try to connect to that port 22 right now?
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- We hit GitLab's web service!
|
||||
|
||||
- We need to tell Traefik what to do with connections to that port 22
|
||||
|
||||
- For that, we will create a "TCP route"
|
||||
|
||||
---
|
||||
|
||||
## Traefik TCP route
|
||||
|
||||
The following custom resource tells Traefik to route the `ssh` port that we
|
||||
created earlier, to the `gitlab-gitlab-shell` service belonging to GitLab.
|
||||
|
||||
```yaml
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
```
|
||||
|
||||
The `HostSNI` wildcard is the magic option to define a "default route".
|
||||
|
||||
---
|
||||
|
||||
## Creating the TCP route
|
||||
|
||||
Since our manifest has backticks, we must pay attention to quoting:
|
||||
|
||||
```bash
|
||||
kubectl apply -f- << "EOF"
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: IngressRouteTCP
|
||||
metadata:
|
||||
name: gitlab-shell
|
||||
namespace: gitlab
|
||||
spec:
|
||||
entryPoints:
|
||||
- ssh
|
||||
routes:
|
||||
- match: HostSNI(\`*`)
|
||||
services:
|
||||
- name: gitlab-gitlab-shell
|
||||
port: 22
|
||||
EOF
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Knocking on port 22, again
|
||||
|
||||
- Let's see what happens if we try port 22 now:
|
||||
```bash
|
||||
curl gitlab.$DOMAIN:22
|
||||
```
|
||||
|
||||
- This should tell us something like `Received HTTP/0.9 when not allowed`
|
||||
|
||||
(because we're no longer talking to an HTTP server, but to SSH!)
|
||||
|
||||
- Try with SSH:
|
||||
```bash
|
||||
ssh git@gitlab.$DOMAIN
|
||||
```
|
||||
|
||||
- After accepting the key fingerprint, we should see `Welcome to GitLab, @root!`
|
||||
|
||||
---
|
||||
|
||||
## Pushing again
|
||||
|
||||
- Now we can try to push our repository again:
|
||||
```bash
|
||||
git push -u gitlab
|
||||
```
|
||||
|
||||
- Reload the project page in GitLab
|
||||
|
||||
- We should see our repository!
|
||||
|
||||
---
|
||||
|
||||
## CI/CD
|
||||
|
||||
- Click on the CI/CD tab on the left
|
||||
|
||||
(the one with the shuttle / space rocket icon)
|
||||
|
||||
- Our pipeline was detected...
|
||||
|
||||
- But it failed 😕
|
||||
|
||||
- Let's click on one of the failed jobs
|
||||
|
||||
- This is a permission issue!
|
||||
|
||||
---
|
||||
|
||||
## Fixing permissions
|
||||
|
||||
- GitLab needs to do a few of things in our cluster:
|
||||
|
||||
- create Pods to build our container images with BuildKit
|
||||
|
||||
- create Namespaces to deploy staging and production versions of our app
|
||||
|
||||
- create and update resources in these Namespaces
|
||||
|
||||
- For the time being, we're going to grant broad permissions
|
||||
|
||||
(and we will revisit and discuss what to do later)
|
||||
|
||||
---
|
||||
|
||||
## Granting permissions
|
||||
|
||||
- Let's give `cluster-admin` permissions to the GitLab ServiceAccount:
|
||||
```bash
|
||||
kubectl create clusterrolebinding gitlab \
|
||||
--clusterrole=cluster-admin --serviceaccount=gitlab:default
|
||||
```
|
||||
|
||||
- Then retry the CI/CD pipeline
|
||||
|
||||
- The build steps will now succeed; but the deploy steps will fail
|
||||
|
||||
- We need to set the `REGISTRY_USER` and `REGISTRY_PASSWORD` variables
|
||||
|
||||
- Let's explain what this is about!
|
||||
|
||||
---
|
||||
|
||||
## GitLab container registry access
|
||||
|
||||
- A registry access token is created for the duration of the CI/CD pipeline
|
||||
|
||||
(it is exposed through the `$CI_JOB_TOKEN` environment variable)
|
||||
|
||||
- This token gives access only to a specific repository in the registry
|
||||
|
||||
- It is valid only during the execution of the CI/CD pipeline
|
||||
|
||||
- We can (and we do!) use it to *push* images to the registry
|
||||
|
||||
- We cannot use it to *pull* images when running in staging or production
|
||||
|
||||
(because Kubernetes might need to pull images *after* the token expires)
|
||||
|
||||
- We need to create a separate read-only registry access token
|
||||
|
||||
---
|
||||
|
||||
## Creating the registry access token
|
||||
|
||||
- Let's go to "Settings" (the cog wheel on the left) / "Access Tokens"
|
||||
|
||||
- Create a token with `read_registry` permission
|
||||
|
||||
- Save the token name and the token value
|
||||
|
||||
- Then go to "Settings" / "CI/CD"
|
||||
|
||||
- In the "Variables" section, add two variables:
|
||||
|
||||
- `REGISTRY_USER` → token name
|
||||
- `REGISTRY_PASSWORD` → token value
|
||||
|
||||
- Make sure that they are **not** protected!
|
||||
|
||||
(otherwise, they won't be available in non-default tags and branches)
|
||||
|
||||
---
|
||||
|
||||
## Trying again
|
||||
|
||||
- Go back to the CI/CD pipeline view, and hit "Retry"
|
||||
|
||||
- The deploy stage should now work correctly! 🎉
|
||||
|
||||
---
|
||||
|
||||
## Our CI/CD pipeline
|
||||
|
||||
- Let's have a look at the [.gitlab-ci.yml](https://github.com/jpetazzo/kubecoin/blob/107dac5066087c52747e557babc97e57f42dd71d/.gitlab-ci.yml) file
|
||||
|
||||
- We have multiple *stages*:
|
||||
|
||||
- lint (currently doesn't do much, it's mostly as an example)
|
||||
|
||||
- build (currently uses BuildKit)
|
||||
|
||||
- deploy
|
||||
|
||||
- "Deploy" behaves differently in staging and production
|
||||
|
||||
- Let's investigate that!
|
||||
|
||||
---
|
||||
|
||||
## Staging vs production
|
||||
|
||||
- In our pipeline, "production" means "a tag or branch named `production`"
|
||||
|
||||
(see the `except:` and `only:` sections)
|
||||
|
||||
- Everything else is "staging"
|
||||
|
||||
- In "staging":
|
||||
|
||||
- we build and push images
|
||||
- we create a staging Namespace and deploy a copy of the app there
|
||||
|
||||
- In "production":
|
||||
|
||||
- we do not build anything
|
||||
- we deploy (or update) a copy of the app in the production Namespace
|
||||
|
||||
---
|
||||
|
||||
## Namespace naming
|
||||
|
||||
- GitLab will create Namespaces named `gl-<user>-<project>-<hash>`
|
||||
|
||||
- At the end of the deployment, the web UI will be available at:
|
||||
|
||||
`http://<user>-<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
- The "production" Namespace will be `<user>-<project>`
|
||||
|
||||
- And it will be available on its own domain as well:
|
||||
|
||||
`http://<project>-<githash>-gitlab.<domain>`
|
||||
|
||||
---
|
||||
|
||||
@@ -325,7 +664,7 @@ Then we need to:
|
||||
|
||||
- It will do it *only* if that same git commit was pushed to staging first
|
||||
|
||||
(look in the pipeline configuration file to see how it's done!)
|
||||
(because the "production" pipeline skips the build phase)
|
||||
|
||||
---
|
||||
|
||||
@@ -411,35 +750,15 @@ Then we need to:
|
||||
|
||||
---
|
||||
|
||||
## Pros
|
||||
## Why not use GitLab's Kubernetes integration?
|
||||
|
||||
- GitLab is an amazing, open source, all-in-one platform
|
||||
- "All-in-one" approach
|
||||
|
||||
- Available as hosted, community, or enterprise editions
|
||||
(deploys its own Ingress, cert-manager, Prometheus, and much more)
|
||||
|
||||
- Rich ecosystem, very customizable
|
||||
- I wanted to show you something flexible and customizable instead
|
||||
|
||||
- Can run on Kubernetes, or somewhere else
|
||||
|
||||
---
|
||||
|
||||
## Cons
|
||||
|
||||
- It can be difficult to use components separately
|
||||
|
||||
(e.g. use a different registry, or a different job runner)
|
||||
|
||||
- More than one way to configure it
|
||||
|
||||
(it's not an opinionated platform)
|
||||
|
||||
- Not "Kubernetes-native"
|
||||
|
||||
(for instance, jobs are not Kubernetes jobs)
|
||||
|
||||
- Job latency could be improved
|
||||
|
||||
*Note: most of these drawbacks are the flip side of the "pros" on the previous slide!*
|
||||
- But feel free to explore it now that we have shown the basics!
|
||||
|
||||
???
|
||||
|
||||
|
||||
@@ -40,7 +40,22 @@
|
||||
|
||||
- a `Chart.yaml` file, containing metadata (name, version, description ...)
|
||||
|
||||
- Let's look at a simple chart, `stable/tomcat`
|
||||
- Let's look at a simple chart for a basic demo app
|
||||
|
||||
---
|
||||
|
||||
## Adding the repo
|
||||
|
||||
- If you haven't done it before, you need to add the repo for that chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the repo that holds the chart for the OWASP Juice Shop:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -50,17 +65,17 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the tarball for `stable/tomcat`:
|
||||
- Download the tarball for `juice/juice-shop`:
|
||||
```bash
|
||||
helm pull stable/tomcat
|
||||
helm pull juice/juice-shop
|
||||
```
|
||||
(This will create a file named `tomcat-X.Y.Z.tgz`.)
|
||||
(This will create a file named `juice-shop-X.Y.Z.tgz`.)
|
||||
|
||||
- Or, download + untar `stable/tomcat`:
|
||||
- Or, download + untar `juice/juice-shop`:
|
||||
```bash
|
||||
helm pull stable/tomcat --untar
|
||||
helm pull juice/juice-shop --untar
|
||||
```
|
||||
(This will create a directory named `tomcat`.)
|
||||
(This will create a directory named `juice-shop`.)
|
||||
|
||||
]
|
||||
|
||||
@@ -68,13 +83,13 @@
|
||||
|
||||
## Looking at the chart's content
|
||||
|
||||
- Let's look at the files and directories in the `tomcat` chart
|
||||
- Let's look at the files and directories in the `juice-shop` chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the tree structure of the chart we just downloaded:
|
||||
```bash
|
||||
tree tomcat
|
||||
tree juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
@@ -93,12 +108,11 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`
|
||||
|
||||
(using the standard Go template library)
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the template file for the tomcat Service resource:
|
||||
- Look at the template file for the Service resource:
|
||||
```bash
|
||||
cat tomcat/templates/appsrv-svc.yaml
|
||||
cat juice-shop/templates/service.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -190,7 +204,7 @@ We see the components mentioned above: `Chart.yaml`, `templates/`, `values.yaml`
|
||||
|
||||
- At the top-level of the chart, it's a good idea to have a README
|
||||
|
||||
- It will be viewable with e.g. `helm show readme stable/tomcat`
|
||||
- It will be viewable with e.g. `helm show readme juice/juice-shop`
|
||||
|
||||
- In the `templates/` directory, we can also have a `NOTES.txt` file
|
||||
|
||||
|
||||
338
slides/k8s/helm-dependencies.md
Normal file
@@ -0,0 +1,338 @@
|
||||
# Charts using other charts
|
||||
|
||||
- Helm charts can have *dependencies* on other charts
|
||||
|
||||
- These dependencies will help us to share or reuse components
|
||||
|
||||
(so that we write and maintain less manifests, less templates, less code!)
|
||||
|
||||
- As an example, we will use a community chart for Redis
|
||||
|
||||
- This will help people who write charts, and people who use them
|
||||
|
||||
- ... And potentially remove a lot of code! ✌️
|
||||
|
||||
---
|
||||
|
||||
## Redis in DockerCoins
|
||||
|
||||
- In the DockerCoins demo app, we have 5 components:
|
||||
|
||||
- 2 internal webservices
|
||||
- 1 worker
|
||||
- 1 public web UI
|
||||
- 1 Redis data store
|
||||
|
||||
- Every component is running some custom code, except Redis
|
||||
|
||||
- Every component is using a custom image, except Redis
|
||||
|
||||
(which is using the official `redis` image)
|
||||
|
||||
- Could we use a standard chart for Redis?
|
||||
|
||||
- Yes! Dependencies to the rescue!
|
||||
|
||||
---
|
||||
|
||||
## Adding our dependency
|
||||
|
||||
- First, we will add the dependency to the `Chart.yaml` file
|
||||
|
||||
- Then, we will ask Helm to download that dependency
|
||||
|
||||
- We will also *lock* the dependency
|
||||
|
||||
(lock it to a specific version, to ensure reproducibility)
|
||||
|
||||
---
|
||||
|
||||
## Declaring the dependency
|
||||
|
||||
- First, let's edit `Chart.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- In `Chart.yaml`, fill the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: 11.0.5
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Where do that `repository` and `version` come from?
|
||||
|
||||
We're assuming here that we did our reserach,
|
||||
or that our resident Helm expert advised us to
|
||||
use Bitnami's Redis chart.
|
||||
|
||||
---
|
||||
|
||||
## Conditions
|
||||
|
||||
- The `condition` field gives us a way to enable/disable the dependency:
|
||||
```yaml
|
||||
conditions: redis.enabled
|
||||
```
|
||||
|
||||
- Here, we can disable Redis with the Helm flag `--set redis.enabled=false`
|
||||
|
||||
(or set that value in a `values.yaml` file)
|
||||
|
||||
- Of course, this is mostly useful for *optional* dependencies
|
||||
|
||||
(otherwise, the app ends up being broken since it'll miss a component)
|
||||
|
||||
---
|
||||
|
||||
## Lock & Load!
|
||||
|
||||
- After adding the dependency, we ask Helm to pin an download it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ask Helm:
|
||||
```bash
|
||||
helm dependency update
|
||||
```
|
||||
|
||||
(Or `helm dep up`)
|
||||
|
||||
]
|
||||
|
||||
- This wil create `Chart.lock` and fetch the dependency
|
||||
|
||||
---
|
||||
|
||||
## What's `Chart.lock`?
|
||||
|
||||
- This is a common pattern with dependencies
|
||||
|
||||
(see also: `Gemfile.lock`, `package.json.lock`, and many others)
|
||||
|
||||
- This lets us define loose dependencies in `Chart.yaml`
|
||||
|
||||
(e.g. "version 11.whatever, but below 12")
|
||||
|
||||
- But have the exact version used in `Chart.lock`
|
||||
|
||||
- This ensures reproducible deployments
|
||||
|
||||
- `Chart.lock` can (should!) be added to our source tree
|
||||
|
||||
- `Chart.lock` can (should!) regularly be updated
|
||||
|
||||
---
|
||||
|
||||
## Loose dependencies
|
||||
|
||||
- Here is an example of loose version requirement:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: ">=11 <12"
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
- This makes sure that we have the most recent version in the 11.x train
|
||||
|
||||
- ... But without upgrading to version 12.x
|
||||
|
||||
(because it might be incompatible)
|
||||
|
||||
---
|
||||
|
||||
## `build` vs `update`
|
||||
|
||||
- Helm actually offers two commands to manage dependencies:
|
||||
|
||||
`helm dependency build` = fetch dependencies listed in `Chart.lock`
|
||||
|
||||
`helm dependency update` = update `Chart.lock` (and run `build`)
|
||||
|
||||
- When the dependency gets updated, we can/should:
|
||||
|
||||
- `helm dep up` (update `Chart.lock` and fetch new chart)
|
||||
|
||||
- test!
|
||||
|
||||
- if everything is fine, `git add Chart.lock` and commit
|
||||
|
||||
---
|
||||
|
||||
## Where are my dependencies?
|
||||
|
||||
- Dependencies are downloaded to the `charts/` subdirectory
|
||||
|
||||
- When they're downloaded, they stay in compressed format (`.tgz`)
|
||||
|
||||
- Should we commit them to our code repository?
|
||||
|
||||
- Pros:
|
||||
|
||||
- more resilient to internet/mirror failures/decomissioning
|
||||
|
||||
- Cons:
|
||||
|
||||
- can add a lot of weight to the repo if charts are big or change often
|
||||
|
||||
- this can be solved by extra tools like git-lfs
|
||||
|
||||
---
|
||||
|
||||
## Dependency tuning
|
||||
|
||||
- DockerCoins expects the `redis` Service to be named `redis`
|
||||
|
||||
- Our Redis chart uses a different Service name by default
|
||||
|
||||
- Service name is `{{ template "redis.fullname" . }}-master`
|
||||
|
||||
- `redis.fullname` looks like this:
|
||||
```
|
||||
{{- define "redis.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
[...]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
```
|
||||
|
||||
- How do we fix this?
|
||||
|
||||
---
|
||||
|
||||
## Setting dependency variables
|
||||
|
||||
- If we set `fullnameOverride` to `redis`:
|
||||
|
||||
- the `{{ template ... }}` block will output `redis`
|
||||
|
||||
- the Service name will be `redis-master`
|
||||
|
||||
- A parent chart can set values for its dependencies
|
||||
|
||||
- For example, in the parent's `values.yaml`:
|
||||
|
||||
```yaml
|
||||
redis: # Name of the dependency
|
||||
fullnameOverride: redis # Value passed to redis
|
||||
cluster: # Other values passed to redis
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- User can also set variables with `--set=` or with `--values=`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Passing templates
|
||||
|
||||
- We can even pass template `{{ include "template.name" }}`, but warning:
|
||||
|
||||
- need to be evaluated with the `tpl` function, on the child side
|
||||
|
||||
- evaluated in the context of the child, with no access to parent variables
|
||||
|
||||
<!-- FIXME this probably deserves an example, but I can't imagine one right now 😅 -->
|
||||
|
||||
---
|
||||
|
||||
## Getting rid of the `-master`
|
||||
|
||||
- Even if we set that `fullnameOverride`, the Service name will be `redis-master`
|
||||
|
||||
- To remove the `-master` suffix, we need to edit the chart itself
|
||||
|
||||
- To edit the Redis chart, we need to *embed* it in our own chart
|
||||
|
||||
- We need to:
|
||||
|
||||
- decompress the chart
|
||||
|
||||
- adjust `Chart.yaml` accordingly
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency
|
||||
|
||||
.exercise[
|
||||
|
||||
- Decompress the chart:
|
||||
```yaml
|
||||
cd charts
|
||||
tar zxf redis-*.tgz
|
||||
cd ..
|
||||
```
|
||||
|
||||
- Edit `Chart.yaml` and update the `dependencies` section:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*' # No need to constraint version, from local files
|
||||
```
|
||||
|
||||
- Run `helm dep update`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the dependency
|
||||
|
||||
- Now we can edit the Service name
|
||||
|
||||
(it should be in `charts/redis/templates/redis-master-svc.yaml`)
|
||||
|
||||
- Then try to deploy the whole chart!
|
||||
|
||||
---
|
||||
|
||||
## Embedding a dependency multiple times
|
||||
|
||||
- What if we need multiple copies of the same subchart?
|
||||
|
||||
(for instance, if we need two completely different Redis servers)
|
||||
|
||||
- We can declare a dependency multiple times, and specify an `alias`:
|
||||
```yaml
|
||||
dependencies:
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: querycache
|
||||
- name: redis
|
||||
version: '*'
|
||||
alias: celeryqueue
|
||||
```
|
||||
|
||||
- `.Chart.Name` will be set to the `alias`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compatibility with Helm 2
|
||||
|
||||
- Chart `apiVersion: v1` is the only version supported by Helm 2
|
||||
|
||||
- Chart v1 is also supported by Helm 3
|
||||
|
||||
- Use v1 if you want to be compatible with Helm 2
|
||||
|
||||
- Instead of `Chart.yaml`, dependencies are defined in `requirements.yaml`
|
||||
|
||||
(and we should commit `requirements.lock` instead of `Chart.lock`)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Depending on other charts
|
||||
:EN:- Charts within charts
|
||||
|
||||
:FR:- Dépendances entre charts
|
||||
:FR:- Un chart peut en cacher un autre
|
||||
@@ -1,20 +1,84 @@
|
||||
# Managing stacks with Helm
|
||||
|
||||
- We created our first resources with `kubectl run`, `kubectl expose` ...
|
||||
- Helm is a (kind of!) package manager for Kubernetes
|
||||
|
||||
- We have also created resources by loading YAML files with `kubectl apply -f`
|
||||
- We can use it to:
|
||||
|
||||
- For larger stacks, managing thousands of lines of YAML is unreasonable
|
||||
- find existing packages (called "charts") created by other folks
|
||||
|
||||
- These YAML bundles need to be customized with variable parameters
|
||||
- install these packages, configuring them for our particular setup
|
||||
|
||||
(E.g.: number of replicas, image version to use ...)
|
||||
- package our own things (for distribution or for internal use)
|
||||
|
||||
- It would be nice to have an organized, versioned collection of bundles
|
||||
- manage the lifecycle of these installs (rollback to previous version etc.)
|
||||
|
||||
- It would be nice to be able to upgrade/rollback these bundles carefully
|
||||
- It's a "CNCF graduate project", indicating a certain level of maturity
|
||||
|
||||
- [Helm](https://helm.sh/) is an open source project offering all these things!
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## From `kubectl run` to YAML
|
||||
|
||||
- We can create resources with one-line commands
|
||||
|
||||
(`kubectl run`, `kubectl createa deployment`, `kubectl expose`...)
|
||||
|
||||
- We can also create resources by loading YAML files
|
||||
|
||||
(with `kubectl apply -f`, `kubectl create -f`...)
|
||||
|
||||
- There can be multiple resources in a single YAML files
|
||||
|
||||
(making them convenient to deploy entire stacks)
|
||||
|
||||
- However, these YAML bundles often need to be customized
|
||||
|
||||
(e.g.: number of replicas, image version to use, features to enable...)
|
||||
|
||||
---
|
||||
|
||||
## Beyond YAML
|
||||
|
||||
- Very often, after putting together our first `app.yaml`, we end up with:
|
||||
|
||||
- `app-prod.yaml`
|
||||
|
||||
- `app-staging.yaml`
|
||||
|
||||
- `app-dev.yaml`
|
||||
|
||||
- instructions indicating to users "please tweak this and that in the YAML"
|
||||
|
||||
- That's where using something like
|
||||
[CUE](https://github.com/cuelang/cue/blob/v0.3.2/doc/tutorial/kubernetes/README.md),
|
||||
[Kustomize](https://kustomize.io/),
|
||||
or [Helm](https://helm.sh/) can help!
|
||||
|
||||
- Now we can do something like this:
|
||||
```bash
|
||||
helm install app ... --set this.parameter=that.value
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Other features of Helm
|
||||
|
||||
- With Helm, we create "charts"
|
||||
|
||||
- These charts can be used internally or distributed publicly
|
||||
|
||||
- Public charts can be indexed through the [Artifact Hub](https://artifacthub.io/)
|
||||
|
||||
- This gives us a way to find and install other folks' charts
|
||||
|
||||
- Helm also gives us ways to manage the lifecycle of what we install:
|
||||
|
||||
- keep track of what we have installed
|
||||
|
||||
- upgrade versions, change parameters, roll back, uninstall
|
||||
|
||||
- Furthermore, even if it's not "the" standard, it's definitely "a" standard!
|
||||
|
||||
---
|
||||
|
||||
@@ -229,71 +293,95 @@ fine for personal and development clusters.)
|
||||
|
||||
---
|
||||
|
||||
## Managing repositories
|
||||
|
||||
- Let's check what repositories we have, and add the `stable` repo
|
||||
|
||||
(the `stable` repo contains a set of official-ish charts)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List our repos:
|
||||
```bash
|
||||
helm repo list
|
||||
```
|
||||
|
||||
- Add the `stable` repo:
|
||||
```bash
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Adding a repo can take a few seconds (it downloads the list of charts from the repo).
|
||||
|
||||
It's OK to add a repo that already exists (it will merely update it).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Deprecation warning
|
||||
## How to find charts, the old way
|
||||
|
||||
- That "stable" is being deprecated, in favor of a more decentralized approach
|
||||
- Helm 2 came with one pre-configured repo, the "stable" repo
|
||||
|
||||
(each community / company / group / project hosting their own repository)
|
||||
(located at https://charts.helm.sh/stable)
|
||||
|
||||
- We're going to use it here for educational purposes
|
||||
- Helm 3 doesn't have any pre-configured repo
|
||||
|
||||
- But if you're looking for production-grade charts, look elsewhere!
|
||||
- The "stable" repo mentioned above is now being deprecated
|
||||
|
||||
(namely, on the Helm Hub)
|
||||
- The new approach is to have fully decentralized repos
|
||||
|
||||
- Repos can be indexed in the Artifact Hub
|
||||
|
||||
(which supersedes the Helm Hub)
|
||||
|
||||
---
|
||||
|
||||
## Search available charts
|
||||
## How to find charts, the new way
|
||||
|
||||
- We can search available charts with `helm search`
|
||||
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
|
||||
|
||||
- We need to specify where to search (only our repos, or Helm Hub)
|
||||
- Or use `helm search hub ...` from the CLI
|
||||
|
||||
- Let's search for all charts mentioning tomcat!
|
||||
- Let's try to find a Helm chart for something called "OWASP Juice Shop"!
|
||||
|
||||
(it is a famous demo app used in security challenges)
|
||||
|
||||
---
|
||||
|
||||
## Finding charts from the CLI
|
||||
|
||||
- We can use `helm search hub <keyword>`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Search for tomcat in the repo that we added earlier:
|
||||
- Look for the OWASP Juice Shop app:
|
||||
```bash
|
||||
helm search repo tomcat
|
||||
helm search hub owasp juice
|
||||
```
|
||||
|
||||
- Search for tomcat on the Helm Hub:
|
||||
- Since the URLs are truncated, try with the YAML output:
|
||||
```bash
|
||||
helm search hub tomcat
|
||||
helm search hub owasp juice -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server.
|
||||
Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop
|
||||
|
||||
---
|
||||
|
||||
## Finding charts on the web
|
||||
|
||||
- We can also use the Artifact Hub search feature
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to https://artifacthub.io/
|
||||
|
||||
- In the search box on top, enter "owasp juice"
|
||||
|
||||
- Click on the "juice-shop" result (not "multi-juicer" or "juicy-ctf")
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing the chart
|
||||
|
||||
- Click on the "Install" button, it will show instructions
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, add the repository for that chart:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
- Then, install the chart:
|
||||
```bash
|
||||
helm install my-juice-shop juice/juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it is also possible to install directly a chart, with `--repo https://...`
|
||||
|
||||
---
|
||||
|
||||
@@ -301,22 +389,22 @@ class: extra-details
|
||||
|
||||
- "Installing a chart" means creating a *release*
|
||||
|
||||
- We need to name that release
|
||||
- In the previous exemple, the release was named "my-juice-shop"
|
||||
|
||||
(or use the `--generate-name` to get Helm to generate one for us)
|
||||
- We can also use `--generate-name` to ask Helm to generate a name for us
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the tomcat chart that we found earlier:
|
||||
```bash
|
||||
helm install java4ever stable/tomcat
|
||||
```
|
||||
|
||||
- List the releases:
|
||||
```bash
|
||||
helm list
|
||||
```
|
||||
|
||||
- Check that we have a `my-juice-shop-...` Pod up and running:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -329,13 +417,13 @@ class: extra-details
|
||||
|
||||
- The `helm search` command only takes a search string argument
|
||||
|
||||
(e.g. `helm search tomcat`)
|
||||
(e.g. `helm search juice-shop`)
|
||||
|
||||
- With Helm 2, the name is optional:
|
||||
|
||||
`helm install stable/tomcat` will automatically generate a name
|
||||
`helm install juice/juice-shop` will automatically generate a name
|
||||
|
||||
`helm install --name java4ever stable/tomcat` will specify a name
|
||||
`helm install --name my-juice-shop juice/juice-shop` will specify a name
|
||||
|
||||
---
|
||||
|
||||
@@ -349,12 +437,12 @@ class: extra-details
|
||||
|
||||
- List all the resources created by this release:
|
||||
```bash
|
||||
kubectl get all --selector=release=java4ever
|
||||
kubectl get all --selector=app.kubernetes.io/instance=my-juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: this `release` label wasn't added automatically by Helm.
|
||||
Note: this label wasn't added automatically by Helm.
|
||||
<br/>
|
||||
It is defined in that chart. In other words, not all charts will provide this label.
|
||||
|
||||
@@ -362,11 +450,11 @@ It is defined in that chart. In other words, not all charts will provide this la
|
||||
|
||||
## Configuring a release
|
||||
|
||||
- By default, `stable/tomcat` creates a service of type `LoadBalancer`
|
||||
- By default, `juice/juice-shop` creates a service of type `ClusterIP`
|
||||
|
||||
- We would like to change that to a `NodePort`
|
||||
|
||||
- We could use `kubectl edit service java4ever-tomcat`, but ...
|
||||
- We could use `kubectl edit service my-juice-shop`, but ...
|
||||
|
||||
... our changes would get overwritten next time we update that chart!
|
||||
|
||||
@@ -386,14 +474,14 @@ It is defined in that chart. In other words, not all charts will provide this la
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the README for tomcat:
|
||||
- Look at the README for the app:
|
||||
```bash
|
||||
helm show readme stable/tomcat
|
||||
helm show readme juice/juice-shop
|
||||
```
|
||||
|
||||
- Look at the values and their defaults:
|
||||
```bash
|
||||
helm show values stable/tomcat
|
||||
helm show values juice/juice-shop
|
||||
```
|
||||
|
||||
]
|
||||
@@ -410,18 +498,18 @@ The `readme` may or may not have (accurate) explanations for the values.
|
||||
|
||||
- Values can be set when installing a chart, or when upgrading it
|
||||
|
||||
- We are going to update `java4ever` to change the type of the service
|
||||
- We are going to update `my-juice-shop` to change the type of the service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update `java4ever`:
|
||||
- Update `my-juice-shop`:
|
||||
```bash
|
||||
helm upgrade java4ever stable/tomcat --set service.type=NodePort
|
||||
helm upgrade my-juice-shop juice/juice-shop --set service.type=NodePort
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note that we have to specify the chart that we use (`stable/tomcat`),
|
||||
Note that we have to specify the chart that we use (`juice/my-juice-shop`),
|
||||
even if we just want to update some values.
|
||||
|
||||
We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values.
|
||||
@@ -430,25 +518,21 @@ All unspecified values will take the default values defined in the chart.
|
||||
|
||||
---
|
||||
|
||||
## Connecting to tomcat
|
||||
## Connecting to the Juice Shop
|
||||
|
||||
- Let's check the tomcat server that we just installed
|
||||
|
||||
- Note: its readiness probe has a 60s delay
|
||||
|
||||
(so it will take 60s after the initial deployment before the service works)
|
||||
- Let's check the app that we just installed
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the node port allocated to the service:
|
||||
```bash
|
||||
kubectl get service java4ever-tomcat
|
||||
PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort})
|
||||
kubectl get service my-juice-shop
|
||||
PORT=$(kubectl get service my-juice-shop -o jsonpath={..nodePort})
|
||||
```
|
||||
|
||||
- Connect to it, checking the demo app on `/sample/`:
|
||||
- Connect to it:
|
||||
```bash
|
||||
curl localhost:$PORT/sample/
|
||||
curl localhost:$PORT/
|
||||
```
|
||||
|
||||
]
|
||||
@@ -462,3 +546,17 @@ All unspecified values will take the default values defined in the chart.
|
||||
:FR:- Fonctionnement général de Helm
|
||||
:FR:- Installer des composants via Helm
|
||||
:FR:- Helm 2, Helm 3, et le *Helm Hub*
|
||||
|
||||
:T: Getting started with Helm and its concepts
|
||||
|
||||
:Q: Which comparison is the most adequate?
|
||||
:A: Helm is a firewall, charts are access lists
|
||||
:A: ✔️Helm is a package manager, charts are packages
|
||||
:A: Helm is an artefact repository, charts are artefacts
|
||||
:A: Helm is a CI/CD platform, charts are CI/CD pipelines
|
||||
|
||||
:Q: What's required to distribute a Helm chart?
|
||||
:A: A Helm commercial license
|
||||
:A: A Docker registry
|
||||
:A: An account on the Helm Hub
|
||||
:A: ✔️An HTTP server
|
||||
|
||||
@@ -12,22 +12,37 @@
|
||||
|
||||
---
|
||||
|
||||
## Adding the repo
|
||||
|
||||
- If you haven't done it before, you need to add the repo for that chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the repo that holds the chart for the OWASP Juice Shop:
|
||||
```bash
|
||||
helm repo add juice https://charts.securecodebox.io
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## We need a release
|
||||
|
||||
- We need to install something with Helm
|
||||
|
||||
- Let's use the `stable/tomcat` chart as an example
|
||||
- Let's use the `juice/juice-shop` chart as an example
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install a release called `tomcat` with the chart `stable/tomcat`:
|
||||
- Install a release called `orange` with the chart `juice/juice-shop`:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --install
|
||||
helm upgrade orange juice/juice-shop --install
|
||||
```
|
||||
|
||||
- Let's upgrade that release, and change a value:
|
||||
```bash
|
||||
helm upgrade tomcat stable/tomcat --set ingress.enabled=true
|
||||
helm upgrade orange juice/juice-shop --set ingress.enabled=true
|
||||
```
|
||||
|
||||
]
|
||||
@@ -42,7 +57,7 @@
|
||||
|
||||
- View the history for that release:
|
||||
```bash
|
||||
helm history tomcat
|
||||
helm history orange
|
||||
```
|
||||
|
||||
]
|
||||
@@ -82,11 +97,11 @@ We should see a number of secrets with TYPE `helm.sh/release.v1`.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Examine the secret corresponding to the second release of `tomcat`:
|
||||
- Examine the secret corresponding to the second release of `orange`:
|
||||
```bash
|
||||
kubectl describe secret sh.helm.release.v1.tomcat.v2
|
||||
kubectl describe secret sh.helm.release.v1.orange.v2
|
||||
```
|
||||
(`v1` is the secret format; `v2` means revision 2 of the `tomcat` release)
|
||||
(`v1` is the secret format; `v2` means revision 2 of the `orange` release)
|
||||
|
||||
]
|
||||
|
||||
@@ -102,7 +117,7 @@ There is a key named `release`.
|
||||
|
||||
- Dump the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release }}'
|
||||
```
|
||||
|
||||
@@ -120,7 +135,7 @@ Secrets are encoded in base64. We need to decode that!
|
||||
|
||||
- Decode the secret:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode }}'
|
||||
```
|
||||
|
||||
@@ -144,7 +159,7 @@ Let's try one more round of decoding!
|
||||
|
||||
- Decode it twice:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}'
|
||||
```
|
||||
|
||||
@@ -164,7 +179,7 @@ Let's try one more round of decoding!
|
||||
|
||||
- Pipe the decoded release through `file -`:
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| file -
|
||||
```
|
||||
@@ -185,7 +200,7 @@ Gzipped data! It can be decoded with `gunzip -c`.
|
||||
|
||||
- Rerun the previous command, but with `| gunzip -c > release-info` :
|
||||
```bash
|
||||
kubectl get secret sh.helm.release.v1.tomcat.v2 \
|
||||
kubectl get secret sh.helm.release.v1.orange.v2 \
|
||||
-o go-template='{{ .data.release | base64decode | base64decode }}' \
|
||||
| gunzip -c > release-info
|
||||
```
|
||||
@@ -211,7 +226,7 @@ If we inspect that JSON (e.g. with `jq keys release-info`), we see:
|
||||
- `config` (contains the values that we've set)
|
||||
- `info` (date of deployment, status messages)
|
||||
- `manifest` (YAML generated from the templates)
|
||||
- `name` (name of the release, so `tomcat`)
|
||||
- `name` (name of the release, so `orange`)
|
||||
- `namespace` (namespace where we deployed the release)
|
||||
- `version` (revision number within that release; starts at 1)
|
||||
|
||||
|
||||
191
slides/k8s/helm-values-schema-validation.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Helm and invalid values
|
||||
|
||||
- A lot of Helm charts let us specify an image tag like this:
|
||||
```bash
|
||||
helm install ... --set image.tag=v1.0
|
||||
```
|
||||
|
||||
- What happens if we make a small mistake, like this:
|
||||
```bash
|
||||
helm install ... --set imagetag=v1.0
|
||||
```
|
||||
|
||||
- Or even, like this:
|
||||
```bash
|
||||
helm install ... --set image=v1.0
|
||||
```
|
||||
|
||||
🤔
|
||||
|
||||
---
|
||||
|
||||
## Making mistakes
|
||||
|
||||
- In the first case:
|
||||
|
||||
- we set `imagetag=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- Helm will ignore that value (if it's not used anywhere in templates)
|
||||
|
||||
- the chart is deployed with the default value instead
|
||||
|
||||
- In the second case:
|
||||
|
||||
- we set `image=v1.0` instead of `image.tag=v1.0`
|
||||
|
||||
- `image` will be a string instead of an object
|
||||
|
||||
- Helm will *probably* fail when trying to evaluate `image.tag`
|
||||
|
||||
---
|
||||
|
||||
## Preventing mistakes
|
||||
|
||||
- To prevent the first mistake, we need to tell Helm:
|
||||
|
||||
*"let me know if any additional (unknonw) value was set!"*
|
||||
|
||||
- To prevent the second mistake, we need to tell Helm:
|
||||
|
||||
*"`image` should be an object, and `image.tag` should be a string!"*
|
||||
|
||||
- We can do this with *values schema validation*
|
||||
|
||||
---
|
||||
|
||||
## Helm values schema validation
|
||||
|
||||
- We can write a spec representing the possible values accepted by the chart
|
||||
|
||||
- Helm will check the validity of the values before trying to install/upgrade
|
||||
|
||||
- If it finds problems, it will stop immediately
|
||||
|
||||
- The spec uses [JSON Schema](https://json-schema.org/):
|
||||
|
||||
*JSON Schema is a vocabulary that allows you to annotate and validate JSON documents.*
|
||||
|
||||
- JSON Schema is designed for JSON, but can easily work with YAML too
|
||||
|
||||
(or any language with `map|dict|associativearray` and `list|array|sequence|tuple`)
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We need to put the JSON Schema spec in a file called `values.schema.json`
|
||||
|
||||
(at the root of our chart; right next to `values.yaml` etc.)
|
||||
|
||||
- The file is optional
|
||||
|
||||
- We don't need to register or declare it in `Chart.yaml` or anywhere
|
||||
|
||||
- Let's write a schema that will verify that ...
|
||||
|
||||
- `image.repository` is an official image (string without slashes or dots)
|
||||
|
||||
- `image.pullPolicy` can only be `Always`, `Never`, `IfNotPresent`
|
||||
|
||||
---
|
||||
|
||||
## `values.schema.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string",
|
||||
"pattern": "^[a-z0-9-_]+$"
|
||||
},
|
||||
"pullPolicy": {
|
||||
"type": "string",
|
||||
"pattern": "^(Always|Never|IfNotPresent)$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing our schema
|
||||
|
||||
- Let's try to install a couple releases with that schema!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try an invalid `pullPolicy`:
|
||||
```bash
|
||||
helm install broken --set image.pullPolicy=ShallNotPass
|
||||
```
|
||||
|
||||
- Try an invalid value:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The first one fails, but the second one still passes ...
|
||||
|
||||
- Why?
|
||||
|
||||
---
|
||||
|
||||
## Bailing out on unkown properties
|
||||
|
||||
- We told Helm what properties (values) were valid
|
||||
|
||||
- We didn't say what to do about additional (unknown) properties!
|
||||
|
||||
- We can fix that with `"additionalProperties": false`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `values.schema.json` to add `"additionalProperties": false`
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/schema#",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
...
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing with unknown properties
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to pass an extra property:
|
||||
```bash
|
||||
helm install should-break --set ImAgeTAg=toto
|
||||
```
|
||||
|
||||
- Try to pass an extra nested property:
|
||||
```bash
|
||||
helm install does-it-work --set image.hello=world
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The first command should break.
|
||||
|
||||
The second will not.
|
||||
|
||||
`"additionalProperties": false` needs to be specified at each level.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Helm schema validation
|
||||
:FR:- Validation de schema Helm
|
||||
@@ -52,7 +52,7 @@
|
||||
|
||||
- There are literally dozens of implementations out there
|
||||
|
||||
(15 are listed in the Kubernetes documentation)
|
||||
(https://github.com/containernetworking/cni/ lists more than 25 plugins)
|
||||
|
||||
- Pods have level 3 (IP) connectivity, but *services* are level 4 (TCP or UDP)
|
||||
|
||||
|
||||
521
slides/k8s/openebs.md
Normal file
@@ -0,0 +1,521 @@
|
||||
# OpenEBS
|
||||
|
||||
- [OpenEBS] is a popular open-source storage solution for Kubernetes
|
||||
|
||||
- Uses the concept of "Container Attached Storage"
|
||||
|
||||
(1 volume = 1 dedicated controller pod + a set of replica pods)
|
||||
|
||||
- Supports a wide range of storage engines:
|
||||
|
||||
- LocalPV: local volumes (hostpath or device), no replication
|
||||
|
||||
- Jiva: for lighter workloads with basic cloning/snapshotting
|
||||
|
||||
- cStor: more powerful engine that also supports resizing, RAID, disk pools ...
|
||||
|
||||
- [Mayastor]: newer, even more powerful engine with NVMe and vhost-user support
|
||||
|
||||
[OpenEBS]: https://openebs.io/
|
||||
|
||||
[Mayastor]: https://github.com/openebs/MayaStor#mayastor
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What are all these storage engines?
|
||||
|
||||
- LocalPV is great if we want good performance, no replication, easy setup
|
||||
|
||||
(it is similar to the Rancher local path provisioner)
|
||||
|
||||
- Jiva is great if we want replication and easy setup
|
||||
|
||||
(data is stored in containers' filesystems)
|
||||
|
||||
- cStor is more powerful and flexible, but requires more extensive setup
|
||||
|
||||
- Mayastor is designed to achieve extreme performance levels
|
||||
|
||||
(with the right hardware and disks)
|
||||
|
||||
- The OpenEBS documentation has a [good comparison of engines] to help us pick
|
||||
|
||||
[good comparison of engines]: https://docs.openebs.io/docs/next/casengines.html#cstor-vs-jiva-vs-localpv-features-comparison
|
||||
|
||||
---
|
||||
|
||||
## Installing OpenEBS with Helm
|
||||
|
||||
- The OpenEBS control plane can be installed with Helm
|
||||
|
||||
- It will run as a set of containers on Kubernetes worker nodes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install OpenEBS:
|
||||
```bash
|
||||
helm upgrade --install openebs openebs \
|
||||
--repo https://openebs.github.io/charts \
|
||||
--namespace openebs --create-namespace
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what was installed
|
||||
|
||||
- Wait a little bit ...
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the pods in the `openebs` namespace:
|
||||
```bash
|
||||
kubectl get pods --namespace openebs
|
||||
```
|
||||
|
||||
- And the StorageClasses that were created:
|
||||
```bash
|
||||
kubectl get sc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## The default StorageClasses
|
||||
|
||||
- OpenEBS typically creates three default StorageClasses
|
||||
|
||||
- `openebs-jiva-default` provisions 3 replicated Jiva pods per volume
|
||||
|
||||
- data is stored in `/openebs` in the replica pods
|
||||
- `/openebs` is a localpath volume mapped to `/var/openebs/pvc-...` on the node
|
||||
|
||||
- `openebs-hostpath` uses LocalPV with local directories
|
||||
|
||||
- volumes are hostpath volumes created in `/var/openebs/local` on each node
|
||||
|
||||
- `openebs-device` uses LocalPV with local block devices
|
||||
|
||||
- requires available disks and/or a bit of extra configuration
|
||||
- the default configuration filters out loop, LVM, MD devices
|
||||
|
||||
---
|
||||
|
||||
## When do we need custom StorageClasses?
|
||||
|
||||
- To store LocalPV hostpath volumes on a different path on the host
|
||||
|
||||
- To change the number of replicated Jiva pods
|
||||
|
||||
- To use a different Jiva pool
|
||||
|
||||
(i.e. a different path on the host to store the Jiva volumes)
|
||||
|
||||
- To create a cStor pool
|
||||
|
||||
- ...
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Defining a custom StorageClass
|
||||
|
||||
Example for a LocalPV hostpath class using an extra mount on `/mnt/vol001`:
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: localpv-hostpath-mntvol001
|
||||
annotations:
|
||||
openebs.io/cas-type: local
|
||||
cas.openebs.io/config: |
|
||||
- name: BasePath
|
||||
value: "/mnt/vol001"
|
||||
- name: StorageType
|
||||
value: "hostpath"
|
||||
provisioner: openebs.io/local
|
||||
```
|
||||
|
||||
- `provisioner` needs to be set accordingly
|
||||
- Storage engine is chosen by specifying the annotation `openebs.io/cas-type`
|
||||
- Storage engine configuration is set with the annotation `cas.openebs.io/config`
|
||||
|
||||
---
|
||||
|
||||
## Checking the default hostpath StorageClass
|
||||
|
||||
- Let's inspect the StorageClass that OpenEBS created for us
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's look at the OpenEBS LocalPV hostpath StorageClass:
|
||||
```bash
|
||||
kubectl get storageclass openebs-hostpath -o yaml
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create a host path PVC
|
||||
|
||||
- Let's create a Persistent Volume Claim using an explicit StorageClass
|
||||
|
||||
.exercise[
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-hostpath-pvc
|
||||
spec:
|
||||
storageClassName: openebs-hostpath
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1G
|
||||
EOF
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Making sure that a PV was created for our PVC
|
||||
|
||||
- Normally, the `openebs-hostpath` StorageClass created a PV for our PVC
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the PV and PVC:
|
||||
```bash
|
||||
kubectl get pv,pvc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create a Pod to consume the PV
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Pod using that PVC:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/openebs-pod.yaml
|
||||
```
|
||||
|
||||
- Here are the sections that declare and use the volume:
|
||||
```yaml
|
||||
volumes:
|
||||
- name: my-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: local-hostpath-pvc
|
||||
containers:
|
||||
...
|
||||
volumeMounts:
|
||||
- mountPath: /mnt/storage
|
||||
name: my-storage
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verify that data is written on the node
|
||||
|
||||
- Let's find the file written by the Pod on the node where the Pod is running
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the worker node where the pod is located
|
||||
```bash
|
||||
kubectl get pod openebs-local-hostpath-pod -ojsonpath={.spec.nodeName}
|
||||
```
|
||||
|
||||
- SSH into the node
|
||||
|
||||
- Check the volume content
|
||||
```bash
|
||||
sudo tail /var/openebs/local/pvc-*/greet.txt
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Heads up!
|
||||
|
||||
- The following labs and exercises will use the Jiva storage class
|
||||
|
||||
- This storage class creates 3 replicas by default
|
||||
|
||||
- It uses *anti-affinity* placement constraits to put these replicas on different nodes
|
||||
|
||||
- **This requires a cluster with multiple nodes!**
|
||||
|
||||
- It also requires the iSCSI client (aka *initiator*) to be installed on the nodes
|
||||
|
||||
- On many platforms, the iSCSI client is preinstalled and will start automatically
|
||||
|
||||
- If it doesn't, you might want to check [this documentation page] for details
|
||||
|
||||
[this documentation page]: https://docs.openebs.io/docs/next/prerequisites.html
|
||||
|
||||
---
|
||||
|
||||
## The default StorageClass
|
||||
|
||||
- The PVC that we defined earlier specified an explicit StorageClass
|
||||
|
||||
- We can also set a default StorageClass
|
||||
|
||||
- It will then be used for all PVC that *don't* specify and explicit StorageClass
|
||||
|
||||
- This is done with the annotation `storageclass.kubernetes.io/is-default-class`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check if we have a default StorageClass:
|
||||
```bash
|
||||
kubectl get storageclasses
|
||||
```
|
||||
]
|
||||
|
||||
- The default StorageClass (if there is one) is shown with `(default)`
|
||||
|
||||
---
|
||||
|
||||
## Setting a default StorageClass
|
||||
|
||||
- Let's set the default StorageClass to use `openebs-jiva-default`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the annotation (just in case we already have a default class):
|
||||
```bash
|
||||
kubectl annotate storageclass storageclass.kubernetes.io/is-default-class- --all
|
||||
```
|
||||
|
||||
- Annotate the Jiva StorageClass:
|
||||
```bash
|
||||
kubectl annotate storageclasses \
|
||||
openebs-jiva-default storageclass.kubernetes.io/is-default-class=true
|
||||
```
|
||||
|
||||
- Check the result:
|
||||
```bash
|
||||
kuectl get storageclasses
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating a Pod using the Jiva class
|
||||
|
||||
- We will create a Pod running PostgreSQL, using the default class
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Pod:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/postgres.yaml
|
||||
```
|
||||
|
||||
- Wait for the PV, PVC, and Pod to be up:
|
||||
```bash
|
||||
watch kubectl get pv,pvc,pod
|
||||
```
|
||||
|
||||
- We can also check what's going on in the `openebs` namespace:
|
||||
```bash
|
||||
watch kubectl get pods --namespace openebs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Node failover
|
||||
|
||||
⚠️ This will partially break your cluster!
|
||||
|
||||
- We are going to disconnect the node running PostgreSQL from the cluster
|
||||
|
||||
- We will see what happens, and how to recover
|
||||
|
||||
- We will not reconnect the node to the cluster
|
||||
|
||||
- This whole lab will take at least 10-15 minutes (due to various timeouts)
|
||||
|
||||
⚠️ Only do this lab at the very end, when you don't want to run anything else after!
|
||||
|
||||
---
|
||||
|
||||
## Disconnecting the node from the cluster
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find out where the Pod is running, and SSH into that node:
|
||||
```bash
|
||||
kubectl get pod postgres-0 -o jsonpath={.spec.nodeName}
|
||||
ssh nodeX
|
||||
```
|
||||
|
||||
- Check the name of the network interface:
|
||||
```bash
|
||||
sudo ip route ls default
|
||||
```
|
||||
|
||||
- The output should look like this:
|
||||
```
|
||||
default via 10.10.0.1 `dev ensX` proto dhcp src 10.10.0.13 metric 100
|
||||
```
|
||||
|
||||
- Shutdown the network interface:
|
||||
```bash
|
||||
sudo ip link set ensX down
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Watch what's going on
|
||||
|
||||
- Let's look at the status of Nodes, Pods, and Events
|
||||
|
||||
.exercise[
|
||||
|
||||
- In a first pane/tab/window, check Nodes and Pods:
|
||||
```bash
|
||||
watch kubectl get nodes,pods -o wide
|
||||
```
|
||||
|
||||
- In another pane/tab/window, check Events:
|
||||
```bash
|
||||
kubectl get events --watch
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Node Ready → NotReady
|
||||
|
||||
- After \~30 seconds, the control plane stops receiving heartbeats from the Node
|
||||
|
||||
- The Node is marked NotReady
|
||||
|
||||
- It is not *schedulable* anymore
|
||||
|
||||
(the scheduler won't place new pods there, except some special cases)
|
||||
|
||||
- All Pods on that Node are also *not ready*
|
||||
|
||||
(they get removed from service Endpoints)
|
||||
|
||||
- ... But nothing else happens for now
|
||||
|
||||
(the control plane is waiting: maybe the Node will come back shortly?)
|
||||
|
||||
---
|
||||
|
||||
## Pod eviction
|
||||
|
||||
- After \~5 minutes, the control plane will evict most Pods from the Node
|
||||
|
||||
- These Pods are now `Terminating`
|
||||
|
||||
- The Pods controlled by e.g. ReplicaSets are automatically moved
|
||||
|
||||
(or rather: new Pods are created to replace them)
|
||||
|
||||
- But nothing happens to the Pods controlled by StatefulSets at this point
|
||||
|
||||
(they remain `Terminating` forever)
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
--
|
||||
|
||||
- This is to avoid *split brain scenarios*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Split brain 🧠⚡️🧠
|
||||
|
||||
- Imagine that we create a replacement pod `postgres-0` on another Node
|
||||
|
||||
- And 15 minutes later, the Node is reconnected and the original `postgres-0` comes back
|
||||
|
||||
- Which one is the "right" one?
|
||||
|
||||
- What if they have conflicting data?
|
||||
|
||||
😱
|
||||
|
||||
- We *cannot* let that happen!
|
||||
|
||||
- Kubernetes won't do it
|
||||
|
||||
- ... Unless we tell it to
|
||||
|
||||
---
|
||||
|
||||
## The Node is gone
|
||||
|
||||
- One thing we can do, is tell Kubernetes "the Node won't come back"
|
||||
|
||||
(there are other methods; but this one is the simplest one here)
|
||||
|
||||
- This is done with a simple `kubectl delete node`
|
||||
|
||||
.exercise[
|
||||
|
||||
- `kubectl delete` the Node that we disconnected
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Pod rescheduling
|
||||
|
||||
- Kubernetes removes the Node
|
||||
|
||||
- After a brief period of time (\~1 minute) the "Terminating" Pods are removed
|
||||
|
||||
- A replacement Pod is created on another Node
|
||||
|
||||
- ... But it doens't start yet!
|
||||
|
||||
- Why? 🤔
|
||||
|
||||
---
|
||||
|
||||
## Multiple attachment
|
||||
|
||||
- By default, a disk can only be attached to one Node at a time
|
||||
|
||||
(sometimes it's a hardware or API limitation; sometimes enforced in software)
|
||||
|
||||
- In our Events, we should see `FailedAttachVolume` and `FailedMount` messages
|
||||
|
||||
- After \~5 more minutes, the disk will be force-detached from the old Node
|
||||
|
||||
- ... Which will allow attaching it to the new Node!
|
||||
|
||||
🎉
|
||||
|
||||
- The Pod will then be able to start
|
||||
|
||||
- Failover is complete!
|
||||
@@ -331,11 +331,8 @@ consul agent -data-dir=/consul/data -client=0.0.0.0 -server -ui \
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
```
|
||||
|
||||
@@ -353,10 +350,7 @@ consul agent -data-dir=/consul/data -client=0.0.0.0 -server -ui \
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
#- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
-
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
-
|
||||
- k8s/apilb.md
|
||||
#- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
#- k8s/cloud-controller-manager.md
|
||||
#- k8s/bootstrap.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
-
|
||||
#- k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,83 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/interco.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/crd.md
|
||||
- k8s/operators.md
|
||||
- k8s/eck.md
|
||||
###- k8s/operators-design.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
@@ -1,81 +0,0 @@
|
||||
title: |
|
||||
Advanced
|
||||
Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- #1
|
||||
- k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/internal-apis.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- #2
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/interco.md
|
||||
- #3
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/control-plane-auth.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- #4
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
- #5
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/crd.md
|
||||
- #6
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/eck.md
|
||||
- #7
|
||||
- k8s/admission.md
|
||||
- k8s/kyverno.md
|
||||
- #8
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/hpa-v2.md
|
||||
- #9
|
||||
- k8s/operators-design.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/events.md
|
||||
- k8s/finalizers.md
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
#- k8s/record.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
#- k8s/dashboard.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
#- k8s/dryrun.md
|
||||
#- k8s/exercise-yaml.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
#- k8s/gitlab.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/create-more-charts.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/user-cert.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/crd.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/finalizers.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,84 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-overview.md
|
||||
#- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- - k8s/kubectl-run.md
|
||||
#- k8s/batch-jobs.md
|
||||
#- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/record.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm-intro.md
|
||||
#- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,149 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
-
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
-
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
- k8s/yamldeploy.md
|
||||
-
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
-
|
||||
- k8s/ingress.md
|
||||
- k8s/ingress-tls.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- k8s/user-cert.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
-
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/hpa-v2.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/apiserver-deepdive.md
|
||||
- k8s/crd.md
|
||||
- k8s/aggregation-layer.md
|
||||
- k8s/admission.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/kubebuilder.md
|
||||
- k8s/sealed-secrets.md
|
||||
- k8s/kyverno.md
|
||||
- k8s/eck.md
|
||||
- k8s/finalizers.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/events.md
|
||||
-
|
||||
- k8s/dmuc.md
|
||||
- k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/cni-internals.md
|
||||
- k8s/apilb.md
|
||||
- k8s/staticpods.md
|
||||
-
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,119 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/k9s.md
|
||||
#- k8s/tilt.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
#- k8s/exercise-yaml.md
|
||||
-
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
- k8s/gitlab.md
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/admission.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/lastwords.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
57
slides/lke.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
title: |
|
||||
Cloud Native
|
||||
Continuous Deployment
|
||||
with GitLab, Helm, and
|
||||
Linode Kubernetes Engine
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-03-lke.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- lke/intro.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- lke/deploy-cluster.md
|
||||
- lke/kubernetes-review.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/accessinternal.md
|
||||
- lke/what-is-missing.md
|
||||
-
|
||||
- k8s/helm-intro.md
|
||||
- lke/external-dns.md
|
||||
- lke/traefik.md
|
||||
- lke/metrics-server.md
|
||||
#- k8s/prometheus.md
|
||||
- lke/prometheus.md
|
||||
- k8s/cert-manager.md
|
||||
- k8s/gitlab.md
|
||||
#- k8s/helm-chart-format.md
|
||||
#- k8s/helm-create-basic-chart.md
|
||||
#- k8s/helm-create-better-chart.md
|
||||
#- k8s/helm-dependencies.md
|
||||
#- k8s/helm-values-schema-validation.md
|
||||
#- k8s/helm-secrets.md
|
||||
#- k8s/exercise-helm.md
|
||||
-
|
||||
- shared/thankyou.md
|
||||
|
||||
|
||||
#grep md$ lke.yml | grep -v '#' | cut -d- -f2- | xargs subl3
|
||||
163
slides/lke/deploy-cluster.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Deploying our LKE cluster
|
||||
|
||||
- *If we wanted to deploy Kubernetes manually*, what would we need to do?
|
||||
|
||||
(not that I recommend doing that...)
|
||||
|
||||
- Control plane (etcd, API server, scheduler, controllers)
|
||||
|
||||
- Nodes (VMs with a container engine + the Kubelet agent; CNI setup)
|
||||
|
||||
- High availability (etcd clustering, API load balancer)
|
||||
|
||||
- Security (CA and TLS certificates everywhere)
|
||||
|
||||
- Cloud integration (to provision LoadBalancer services, storage...)
|
||||
|
||||
*And that's just to get a basic cluster!*
|
||||
|
||||
---
|
||||
|
||||
## The best way to deploy Kubernetes
|
||||
|
||||
*The best way to deploy Kubernetes is to get someone else to
|
||||
do it for us.*
|
||||
|
||||
(Me, ever since I've been working with Kubernetes)
|
||||
|
||||
---
|
||||
|
||||
## Managed Kubernetes
|
||||
|
||||
- Cloud provider runs the control plane
|
||||
|
||||
(including etcd, API load balancer, TLS setup, cloud integration)
|
||||
|
||||
- We run nodes
|
||||
|
||||
(the cloud provider generally gives us an easy way to provision them)
|
||||
|
||||
- Get started in *minutes*
|
||||
|
||||
- We're going to use [Linode Kubernetes Engine](https://www.linode.com/products/kubernetes/)
|
||||
|
||||
---
|
||||
|
||||
## Creating a cluster
|
||||
|
||||
- With the web console:
|
||||
|
||||
https://cloud.linode.com/kubernetes/clusters
|
||||
|
||||
- Pick the region of your choice
|
||||
|
||||
- Pick the latest available Kubernetes version
|
||||
|
||||
- Pick 3 nodes with 8 GB of RAM
|
||||
|
||||
- Click! ✨
|
||||
|
||||
- Wait a few minutes... ⌚️
|
||||
|
||||
- Download the kubeconfig file 💾
|
||||
|
||||
---
|
||||
|
||||
## With the CLI
|
||||
|
||||
- View available regions with `linode-cli regions list`
|
||||
|
||||
- View available server types with `linode-cli linodes types`
|
||||
|
||||
- View available Kubernetes versions with `linode-cli lke versions-list`
|
||||
|
||||
- Create cluster:
|
||||
```bash
|
||||
linode-cli lke cluster-create --label=hello-lke --region=us-east \
|
||||
--k8s_version=1.20 --node_pools.type=g6-standard-4 --node_pools.count=3
|
||||
```
|
||||
|
||||
- Note the cluster ID (e.g.: 12345)
|
||||
|
||||
- Download the kubeconfig file:
|
||||
```bash
|
||||
linode-cli lke kubeconfig-view `12345` --text --no-headers | base64 -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Communicating with the cluster
|
||||
|
||||
- All the Kubernetes tools (`kubectl`, but also `helm` etc) use the same config file
|
||||
|
||||
- That file is (by default) `$HOME/.kube/config`
|
||||
|
||||
- It can hold multiple cluster definitions (or *contexts*)
|
||||
|
||||
- Or, we can have multiple config files and switch between them:
|
||||
|
||||
- by adding the `--kubeconfig` flag each time we invoke a tool (🙄)
|
||||
|
||||
- or by setting the `KUBECONFIG` environment variable (☺️)
|
||||
|
||||
---
|
||||
|
||||
## Using the kubeconfig file
|
||||
|
||||
Option 1:
|
||||
|
||||
- move the kubeconfig file to e.g. `~/.kube/config.lke`
|
||||
|
||||
- set the environment variable: `export KUBECONFIG=~/.kube/config.lke`
|
||||
|
||||
Option 2:
|
||||
|
||||
- directly move the kubeconfig file to `~/.kube/config`
|
||||
|
||||
- **do not** do that if you already have a file there!
|
||||
|
||||
Option 3:
|
||||
|
||||
- merge the new kubeconfig file with our existing file
|
||||
|
||||
---
|
||||
|
||||
## Merging kubeconfig
|
||||
|
||||
- Assuming that we want to merge `~/.kube/config` and `~/.kube/config.lke` ...
|
||||
|
||||
- Move our existing kubeconfig file:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.old
|
||||
```
|
||||
|
||||
- Merge both files:
|
||||
```bash
|
||||
KUBECONFIG=~/.kube/config.old:~/.kube/config.lke kubectl config \
|
||||
view --raw > ~/.kube/config
|
||||
```
|
||||
|
||||
- Check that everything is there:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Are we there yet?
|
||||
|
||||
- Let's check if our control plane is available:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
|
||||
→ This should show the `kubernetes` `ClusterIP` service
|
||||
|
||||
- Look for our nodes:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
→ This should show 3 nodes (or whatever amount we picked earlier)
|
||||
|
||||
- If the nodes aren't visible yet, give them a minute to join the cluster
|
||||
108
slides/lke/external-dns.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# [ExternalDNS](https://github.com/kubernetes-sigs/external-dns)
|
||||
|
||||
- ExternalDNS will automatically create DNS records from Kubernetes resources
|
||||
|
||||
- Services (with the annotation `external-dns.alpha.kubernetes.io/hostname`)
|
||||
|
||||
- Ingresses (automatically)
|
||||
|
||||
- It requires a domain name (obviously)
|
||||
|
||||
- ... And that domain name should be configurable through an API
|
||||
|
||||
- As of April 2021, it supports [a few dozens of providers](https://github.com/kubernetes-sigs/external-dns#status-of-providers)
|
||||
|
||||
- We're going to use Linode DNS
|
||||
|
||||
---
|
||||
|
||||
## Prep work
|
||||
|
||||
- We need a domain name
|
||||
|
||||
(if you need a cheap one, look e.g. at [GANDI](https://shop.gandi.net/?search=funwithlinode); there are many options below $10)
|
||||
|
||||
- That domain name should be configured to point to Linode DNS servers
|
||||
|
||||
(ns1.linode.com to ns5.linode.com)
|
||||
|
||||
- We need to generate a Linode API token with DNS API access
|
||||
|
||||
- Pro-tip: reduce the default TTL of the domain to 5 minutes!
|
||||
|
||||
---
|
||||
|
||||
## Deploying ExternalDNS
|
||||
|
||||
- The ExternalDNS documentation has a [tutorial](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/linode.md) for Linode
|
||||
|
||||
- ... It's basically a lot of YAML!
|
||||
|
||||
- That's where using a Helm chart will be very helpful
|
||||
|
||||
- There are a few ExternalDNS charts available out there
|
||||
|
||||
- We will use the one from Bitnami
|
||||
|
||||
(these folks maintain *a lot* of great Helm charts!)
|
||||
|
||||
---
|
||||
|
||||
## How we'll install things with Helm
|
||||
|
||||
- We will install each chart in its own namespace
|
||||
|
||||
(this is not mandatory, but it helps to see what belongs to what)
|
||||
|
||||
- We will use `helm upgrade --install` instead of `helm install`
|
||||
|
||||
(that way, if we want to change something, we can just re-run the command)
|
||||
|
||||
- We will use the `--create-namespace` and `--namespace ...` options
|
||||
|
||||
- To keep things boring and predictible, if we are installing chart `xyz`:
|
||||
|
||||
- we will install it in namespace `xyz`
|
||||
|
||||
- we will name the release `xyz` as well
|
||||
|
||||
---
|
||||
|
||||
## Installing ExternalDNS
|
||||
|
||||
- First, let's add the Bitnami repo:
|
||||
```bash
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
```
|
||||
|
||||
- Then, install ExternalDNS:
|
||||
```bash
|
||||
LINODE_API_TOKEN=`1234abcd...6789`
|
||||
helm upgrade --install external-dns bitnami/external-dns \
|
||||
--namespace external-dns --create-namespace \
|
||||
--set provider=linode \
|
||||
--set linode.apiToken=$LINODE_API_TOKEN
|
||||
```
|
||||
|
||||
(Make sure to update your API token above!)
|
||||
|
||||
---
|
||||
|
||||
## Testing ExternalDNS
|
||||
|
||||
- Let's annotate our NGINX service to expose it with a DNS record:
|
||||
```bash
|
||||
kubectl annotate service web \
|
||||
external-dns.alpha.kubernetes.io/hostname=nginx.`cloudnative.party`
|
||||
```
|
||||
|
||||
(make sure to use *your* domain name above, otherwise that won't work!)
|
||||
|
||||
- Check ExternalDNS logs:
|
||||
```bash
|
||||
kubectl logs -n external-dns -l app.kubernetes.io/name=external-dns
|
||||
```
|
||||
|
||||
- It might take a few minutes for ExternalDNS to start, patience!
|
||||
|
||||
- Then try to access `nginx.cloudnative.party` (or whatever domain you picked)
|
||||
175
slides/lke/intro.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Get ready!
|
||||
|
||||
- We're going to set up a whole Continous Deployment pipeline
|
||||
|
||||
- ... for Kubernetes apps
|
||||
|
||||
- ... on a Kubernetes cluster
|
||||
|
||||
- Ingredients: cert-manager, GitLab, Helm, Linode DNS, LKE, Traefik
|
||||
|
||||
---
|
||||
|
||||
## Philosophy
|
||||
|
||||
- "Do one thing, do it well"
|
||||
|
||||
--
|
||||
|
||||
- ... But a CD pipeline is a complex system with interconnected parts!
|
||||
|
||||
- GitLab is no exception to that rule
|
||||
|
||||
- Let's have a look at its components!
|
||||
|
||||
---
|
||||
|
||||
## GitLab components
|
||||
|
||||
- GitLab dependencies listed in the GitLab official Helm chart
|
||||
|
||||
- External dependencies:
|
||||
|
||||
cert-manager, grafana, minio, nginx-ingress, postgresql, prometheus,
|
||||
redis, registry, shared-secrets
|
||||
|
||||
(these dependencies correspond to external charts not created by GitLab)
|
||||
|
||||
- Internal dependencies:
|
||||
|
||||
geo-logcursor, gitaly, gitlab-exporter, gitlab-grafana, gitlab-pages,
|
||||
gitlab-shell, kas, mailroom, migrations, operator, praefect, sidekiq,
|
||||
task-runner, webservice
|
||||
|
||||
(these dependencies correspond to subcharts embedded in the GitLab chart)
|
||||
|
||||
---
|
||||
|
||||
## Philosophy
|
||||
|
||||
- Use the GitLab chart to deploy everything that is specific to GitLab
|
||||
|
||||
- Deploy cluster-wide components separately
|
||||
|
||||
(cert-manager, ExternalDNS, Ingress Controller...)
|
||||
|
||||
---
|
||||
|
||||
## What we're going to do
|
||||
|
||||
- Spin up an LKE cluster
|
||||
|
||||
- Run a simple test app
|
||||
|
||||
- Install a few extras
|
||||
|
||||
(the cluster-wide components mentioned earlier)
|
||||
|
||||
- Set up GitLab
|
||||
|
||||
- Push an app with a CD pipeline to GitLab
|
||||
|
||||
---
|
||||
|
||||
## What you need to know
|
||||
|
||||
- If you just want to follow along and watch...
|
||||
|
||||
- container basics (what's an image, what's a container...)
|
||||
|
||||
- Kubernetes basics (what are Deployments, Namespaces, Pods, Services)
|
||||
|
||||
- If you want to run this on your own Kubernetes cluster...
|
||||
|
||||
- intermediate Kubernetes concepts (annotations, Ingresses)
|
||||
|
||||
- Helm basic concepts (how to install/upgrade releases; how to set "values")
|
||||
|
||||
- basic Kubernetes troubleshooting commands (view logs, events)
|
||||
|
||||
- There will be a lot of explanations and reminders along the way
|
||||
|
||||
---
|
||||
|
||||
## What you need to have
|
||||
|
||||
If you want to run this on your own...
|
||||
|
||||
- A Linode account
|
||||
|
||||
- A domain name that you will point to Linode DNS
|
||||
|
||||
(I got cloudnative.party for $5)
|
||||
|
||||
- Local tools to control your Kubernetes cluster:
|
||||
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
|
||||
|
||||
- [helm](https://helm.sh/docs/intro/install/)
|
||||
|
||||
- Patience, as many operations will require us to wait a few minutes!
|
||||
|
||||
---
|
||||
|
||||
## Do I really need a Linode account?
|
||||
|
||||
- *Can I use a local cluster, e.g. with Minikube?*
|
||||
|
||||
It will be very difficult to get valid TLS certs with a local cluster.
|
||||
|
||||
Also, GitLab needs quite a bit of resources.
|
||||
|
||||
- *Can I use another Kubernetes provider?*
|
||||
|
||||
You certainly can: Kubernetes is a standard platform!
|
||||
|
||||
But you'll have to adjust a few things.
|
||||
|
||||
(I'll try my best to tell you what as we go along.)
|
||||
|
||||
---
|
||||
|
||||
## Why do I need a domain name?
|
||||
|
||||
- Because accessing gitlab.cloudnative.party is easier than 102.34.55.67
|
||||
|
||||
- Because we'll need TLS certificates
|
||||
|
||||
(and it's very easy to obtain certs with Let's Encrypt when we have a domain)
|
||||
|
||||
- We'll illustrate automatic DNS configuration with ExternalDNS, too!
|
||||
|
||||
(Kubernetes will automatically create DNS entries in our domain)
|
||||
|
||||
---
|
||||
|
||||
## Nice-to-haves
|
||||
|
||||
Here are a few tools that I like...
|
||||
|
||||
- [linode-cli](https://github.com/linode/linode-cli#installation)
|
||||
to manage Linode resources from the command line
|
||||
|
||||
- [stern](https://github.com/stern/stern)
|
||||
to comfortably view logs of Kubernetes pods
|
||||
|
||||
- [k9s](https://k9scli.io/topics/install/)
|
||||
to manage Kubernetes resources with that retro BBS look and feel 😎
|
||||
|
||||
- [kube-ps1](https://github.com/jonmosco/kube-ps1)
|
||||
to keep track of which Kubernetes cluster and namespace we're working on
|
||||
|
||||
- [kubectx](https://github.com/ahmetb/kubectx)
|
||||
to easily switch between clusters, contexts, and namespaces
|
||||
|
||||
---
|
||||
|
||||
## Warning ⚠️💸
|
||||
|
||||
- We're going to spin up cloud resources
|
||||
|
||||
- Remember to shut them down when you're down!
|
||||
|
||||
- In the immortal words of Cloud Economist [Corey Quinn](https://twitter.com/QuinnyPig):
|
||||
|
||||
*[You're charged for what you forget to turn off.](https://www.theregister.com/2020/09/03/cloud_control_costs/)*
|
||||
168
slides/lke/kubernetes-review.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Quick Kubernetes review
|
||||
|
||||
- Let's deploy a simple HTTP server
|
||||
|
||||
- And expose it to the outside world!
|
||||
|
||||
- Feel free to skip this section if you're familiar with Kubernetes
|
||||
|
||||
---
|
||||
|
||||
## Creating a container
|
||||
|
||||
- On Kubernetes, one doesn't simply run a container
|
||||
|
||||
- We need to create a "Pod"
|
||||
|
||||
- A Pod will be a group of containers running together
|
||||
|
||||
(often, it will be a group of *one* container)
|
||||
|
||||
- We can create a standalone Pod, but generally, we'll use a *controller*
|
||||
|
||||
(for instance: Deployment, Replica Set, Daemon Set, Job, Stateful Set...)
|
||||
|
||||
- The *controller* will take care of scaling and recreating the Pod if needed
|
||||
|
||||
(note that within a Pod, containers can also be restarted automatically if needed)
|
||||
|
||||
---
|
||||
|
||||
## A *controller*, you said?
|
||||
|
||||
- We're going to use one of the most common controllers: a *Deployment*
|
||||
|
||||
- Deployments...
|
||||
|
||||
- can be scaled (will create the requested number of Pods)
|
||||
|
||||
- will recreate Pods if e.g. they get evicted or their Node is down
|
||||
|
||||
- handle rolling updates
|
||||
|
||||
- Deployments actually delegate a lot of these tasks to *Replica Sets*
|
||||
|
||||
- We will generally have the following hierarchy:
|
||||
|
||||
Deployment → Replica Set → Pod
|
||||
|
||||
---
|
||||
|
||||
## Creating a Deployment
|
||||
|
||||
- Without further ado:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Check what happened:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
- Wait until the NGINX Pod is "Running"!
|
||||
|
||||
- Note: `kubectl create deployment` is great when getting started...
|
||||
|
||||
- ... But later, we will probably write YAML instead!
|
||||
|
||||
---
|
||||
|
||||
## Exposing the Deployment
|
||||
|
||||
- We need to create a Service
|
||||
|
||||
- We can use `kubectl expose` for that
|
||||
|
||||
(but, again, we will probably use YAML later!)
|
||||
|
||||
- For *internal* use, we can use the default Service type, ClusterIP:
|
||||
```bash
|
||||
kubectl expose deployment web --port=80
|
||||
```
|
||||
|
||||
- For *external* use, we can use a Service of type LoadBalancer:
|
||||
```bash
|
||||
kubectl expose deployment web --port=80 --type=LoadBalancer
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Changing the Service type
|
||||
|
||||
- We can `kubectl delete service web` and recreate it
|
||||
|
||||
- Or, `kubectl edit service web` and dive into the YAML
|
||||
|
||||
- Or, `kubectl patch service web --patch '{"spec": {"type": "LoadBalancer"}}'`
|
||||
|
||||
- ... These are just a few "classic" methods; there are many ways to do this!
|
||||
|
||||
---
|
||||
|
||||
## Deployment → Pod
|
||||
|
||||
- Can we check exactly what's going on when the Pod is created?
|
||||
|
||||
- Option 1: `watch kubectl get all`
|
||||
|
||||
- displays all object types
|
||||
- refreshes every 2 seconds
|
||||
- puts a high load on the API server when there are many objects
|
||||
|
||||
- Option 2: `kubectl get pods --watch --output-watch-events`
|
||||
|
||||
- can only display one type of object
|
||||
- will show all modifications happening (à la `tail -f`)
|
||||
- doesn't put a high load on the API server (except for initial display)
|
||||
|
||||
---
|
||||
|
||||
## Recreating the Deployment
|
||||
|
||||
- Let's delete our Deployment:
|
||||
```bash
|
||||
kubectl delete deployment web
|
||||
```
|
||||
|
||||
- Watch Pod updates:
|
||||
```bash
|
||||
kubectl get pods --watch --output-watch-events
|
||||
```
|
||||
|
||||
- Recreate the Deployment and see what Pods do:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Service stability
|
||||
|
||||
- Our Service *still works* even though we deleted and re-created the Deployment
|
||||
|
||||
- It wouldn't have worked while the Deployment was deleted, though
|
||||
|
||||
- A Service is a *stable endpoint*
|
||||
|
||||
???
|
||||
|
||||
:T: Warming up with a quick Kubernetes review
|
||||
|
||||
:Q: In Kubernetes, what is a Pod?
|
||||
:A: ✔️A basic unit of scaling that can contain one or more containers
|
||||
:A: An abstraction for an application and its dependencies
|
||||
:A: It's just a fancy name for "container" but they're the same
|
||||
:A: A group of cluster nodes used for scheduling purposes
|
||||
|
||||
:Q: In Kubernetes, what is a Replica Set?
|
||||
:A: ✔️A controller used to create one or multiple identical Pods
|
||||
:A: A numeric parameter in a Pod specification, used to scale that Pod
|
||||
:A: A group of containers running on the same node
|
||||
:A: A group of containers running on different nodes
|
||||
|
||||
:Q: In Kubernetes, what is a Deployment?
|
||||
:A: ✔️A controller that can manage Replica Sets corresponding to different configurations
|
||||
:A: A manifest telling Kubernetes how to deploy an app and its dependencies
|
||||
:A: A list of instructions executed in a container to configure that container
|
||||
:A: A basic unit of work for the Kubernetes scheduler
|
||||
147
slides/lke/metrics-server.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Installing metrics-server
|
||||
|
||||
- We've installed a few things on our cluster so far
|
||||
|
||||
- How much resources (CPU, RAM) are we using?
|
||||
|
||||
- We need metrics!
|
||||
|
||||
- If metrics-server is installed, we can get Nodes metrics like this:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- At the moment, this should show us `error: Metrics API not available`
|
||||
|
||||
- How do we fix this?
|
||||
|
||||
---
|
||||
|
||||
## Many ways to get metrics
|
||||
|
||||
- We could use a SAAS like Datadog, New Relic...
|
||||
|
||||
- We could use a self-hosted solution like Prometheus
|
||||
|
||||
- Or we could use metrics-server
|
||||
|
||||
- What's special about metrics-server?
|
||||
|
||||
---
|
||||
|
||||
## Pros/cons
|
||||
|
||||
Cons:
|
||||
|
||||
- no data retention (no history data, just instant numbers)
|
||||
|
||||
- only CPU and RAM of nodes and pods (no disk or network usage or I/O...)
|
||||
|
||||
Pros:
|
||||
|
||||
- very lightweight
|
||||
|
||||
- doesn't require storage
|
||||
|
||||
- used by Kubernetes autoscaling
|
||||
|
||||
---
|
||||
|
||||
## Why metrics-server
|
||||
|
||||
- We may install something fancier later
|
||||
|
||||
(think: Prometheus with Grafana)
|
||||
|
||||
- But metrics-server will work in *minutes*
|
||||
|
||||
- It will barely use resources on our cluster
|
||||
|
||||
- It's required for autoscaling anyway
|
||||
|
||||
---
|
||||
|
||||
## How metric-server works
|
||||
|
||||
- It runs a single Pod
|
||||
|
||||
- That Pod will fetch metrics from all our Nodes
|
||||
|
||||
- It will expose them through the Kubernetes API agregation layer
|
||||
|
||||
(we won't say much more about that agregation layer; that's fairly advanced stuff!)
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics-server
|
||||
|
||||
- In a lot of places, this is done with a little bit of custom YAML
|
||||
|
||||
(derived from the [official installation instructions](https://github.com/kubernetes-sigs/metrics-server#installation))
|
||||
|
||||
- We're going to use Helm one more time:
|
||||
```bash
|
||||
helm upgrade --install metrics-server bitnami/metrics-server \
|
||||
--create-namespace --namespace metrics-server \
|
||||
--set apiService.create=true \
|
||||
--set extraArgs.kubelet-insecure-tls=true \
|
||||
--set extraArgs.kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
- What are these options for?
|
||||
|
||||
---
|
||||
|
||||
## Installation options
|
||||
|
||||
- `apiService.create=true`
|
||||
|
||||
register `metrics-server` with the Kubernetes agregation layer
|
||||
|
||||
(create an entry that will show up in `kubectl get apiservices`)
|
||||
|
||||
- `extraArgs.kubelet-insecure-tls=true`
|
||||
|
||||
when connecting to nodes to collect their metrics, don't check kubelet TLS certs
|
||||
|
||||
(because most kubelet certs include the node name, but not its IP address)
|
||||
|
||||
- `extraArgs.kubelet-preferred-address-types=InternalIP`
|
||||
|
||||
when connecting to nodes, use their internal IP address instead of node name
|
||||
|
||||
(because the latter requires an internal DNS, which is rarely configured)
|
||||
|
||||
---
|
||||
|
||||
## Testing metrics-server
|
||||
|
||||
- After a minute or two, metrics-server should be up
|
||||
|
||||
- We should now be able to check Nodes resource usage:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- And Pods resource usage, too:
|
||||
```bash
|
||||
kubectl top pods --all-namespaces
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Keep some padding
|
||||
|
||||
- The RAM usage that we see should correspond more or less to the Resident Set Size
|
||||
|
||||
- Our pods also need some extra space for buffers, caches...
|
||||
|
||||
- Do not aim for 100% memory usage!
|
||||
|
||||
- Some more realistic targets:
|
||||
|
||||
50% (for workloads with disk I/O and leveraging caching)
|
||||
|
||||
90% (on very big nodes with mostly CPU-bound workloads)
|
||||
|
||||
75% (anywhere in between!)
|
||||
120
slides/lke/prometheus.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Prometheus and Grafana
|
||||
|
||||
- What if we want metrics retention, view graphs, trends?
|
||||
|
||||
- A very popular combo is Prometheus+Grafana:
|
||||
|
||||
- Prometheus as the "metrics engine"
|
||||
|
||||
- Grafana to display comprehensive dashboards
|
||||
|
||||
- Prometheus also has an alert-manager component to trigger alerts
|
||||
|
||||
(we won't talk about that one)
|
||||
|
||||
---
|
||||
|
||||
## Installing Prometheus and Grafana
|
||||
|
||||
- A complete metrics stack needs at least:
|
||||
|
||||
- the Prometheus server (collects metrics and stores them efficiently)
|
||||
|
||||
- a collection of *exporters* (exposing metrics to Prometheus)
|
||||
|
||||
- Grafana
|
||||
|
||||
- a collection of Grafana dashboards (building them from scratch is tedious)
|
||||
|
||||
- The Helm chart `kube-prometheus-stack` combines all these elements
|
||||
|
||||
- ... So we're going to use it to deploy our metrics stack!
|
||||
|
||||
---
|
||||
|
||||
## Installing `kube-prometheus-stack`
|
||||
|
||||
- Let's install that stack *directly* from its repo
|
||||
|
||||
(without doing `helm repo add` first)
|
||||
|
||||
- Otherwise, keep the same naming strategy:
|
||||
```bash
|
||||
helm upgrade --install kube-prometheus-stack kube-prometheus-stack \
|
||||
--namespace kube-prometheus-stack --create-namespace \
|
||||
--repo https://prometheus-community.github.io/helm-charts
|
||||
```
|
||||
|
||||
- This will take a minute...
|
||||
|
||||
- Then check what was installed:
|
||||
```bash
|
||||
kubectl get all --namespace kube-prometheus-stack
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Exposing Grafana
|
||||
|
||||
- Let's create an Ingress for Grafana
|
||||
```bash
|
||||
kubectl create ingress --namespace kube-prometheus-stack grafana \
|
||||
--rule=grafana.`cloudnative.party`/*=kube-prometheus-stack-grafana:80
|
||||
```
|
||||
|
||||
(as usual, make sure to use *your* domain name above)
|
||||
|
||||
- Connect to Grafana
|
||||
|
||||
(remember that the DNS record might take a few minutes to come up)
|
||||
|
||||
---
|
||||
|
||||
## Grafana credentials
|
||||
|
||||
- What could the login and password be?
|
||||
|
||||
- Let's look at the Secrets available in the namespace:
|
||||
```bash
|
||||
kubectl get secrets --namespace kube-prometheus-stack
|
||||
```
|
||||
|
||||
- There is a `kube-prometheus-stack-grafana` that looks promising!
|
||||
|
||||
- Decode the Secret:
|
||||
```bash
|
||||
kubectl get secret --namespace kube-prometheus-stack \
|
||||
kube-prometheus-stack-grafana -o json | jq '.data | map_values(@base64d)'
|
||||
```
|
||||
|
||||
- If you don't have the `jq` tool mentioned above, don't worry...
|
||||
|
||||
--
|
||||
|
||||
- The login/password is hardcoded to `admin`/`prom-operator` 😬
|
||||
|
||||
---
|
||||
|
||||
## Grafana dashboards
|
||||
|
||||
- Once logged in, click on the "Dashboards" icon on the left
|
||||
|
||||
(it's the one that looks like four squares)
|
||||
|
||||
- Then click on the "Manage" entry
|
||||
|
||||
- Then click on "Kubernetes / Compute Resources / Cluster"
|
||||
|
||||
- This gives us a breakdown of resource usage by Namespace
|
||||
|
||||
- Feel free to explore the other dashboards!
|
||||
|
||||
???
|
||||
|
||||
:T: Observing our cluster with Prometheus and Grafana
|
||||
|
||||
:Q: What's the relationship between Prometheus and Grafana?
|
||||
:A: Prometheus collects and graphs metrics; Grafana sends alerts
|
||||
:A: ✔️Prometheus collects metrics; Grafana displays them on dashboards
|
||||
:A: Prometheus collects and graphs metrics; Grafana is its configuration interface
|
||||
:A: Grafana collects and graphs metrics; Prometheus sends alerts
|
||||
150
slides/lke/traefik.md
Normal file
@@ -0,0 +1,150 @@
|
||||
# Installing Traefik
|
||||
|
||||
- Traefik is going to be our Ingress Controller
|
||||
|
||||
- Let's install it with a Helm chart, in its own namespace
|
||||
|
||||
- First, let's add the Traefik chart repository:
|
||||
```bash
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
```
|
||||
|
||||
- Then, install the chart:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true"
|
||||
```
|
||||
|
||||
(that option that we added enables HTTPS, it will be useful later!)
|
||||
|
||||
---
|
||||
|
||||
## Testing Traefik
|
||||
|
||||
- Let's create an Ingress resource!
|
||||
|
||||
- If we're using Kubernetes 1.20 or later, we can simply do this:
|
||||
```bash
|
||||
kubectl create ingress web \
|
||||
--rule=`ingress-is-fun.cloudnative.party`/*=web:80
|
||||
```
|
||||
|
||||
(make sure to update and use your own domain)
|
||||
|
||||
- Check that the Ingress was correctly created:
|
||||
```bash
|
||||
kubectl get ingress
|
||||
kubectl describe ingress
|
||||
```
|
||||
|
||||
- If we're using Kubernetes 1.19 or earlier, we'll need some YAML
|
||||
|
||||
---
|
||||
|
||||
## Creating an Ingress with YAML
|
||||
|
||||
- This is how we do it with YAML:
|
||||
```bash
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: web
|
||||
spec:
|
||||
rules:
|
||||
- host: `ingress-is-fun.cloudnative.party`
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: web
|
||||
servicePort: 80
|
||||
EOF
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Ingress versions...
|
||||
|
||||
- Note how we used the `v1beta1` Ingress version on the previous YAML
|
||||
|
||||
(to be compatible with older Kubernetes versions)
|
||||
|
||||
- This YAML will give you deprecation warnings on recent version of Kubernetes
|
||||
|
||||
(since the Ingress spec is now at version `v1`)
|
||||
|
||||
- Don't worry too much about the deprecation warnings
|
||||
|
||||
(on Kubernetes, deprecation happens over a long time window, typically 1 year)
|
||||
|
||||
- You will have time to revisit and worry later! 😅
|
||||
|
||||
---
|
||||
|
||||
## Does it work?
|
||||
|
||||
- Try to connect to the Ingress host name
|
||||
|
||||
(in my example, http://ingress-is-fun.cloudnative.party/)
|
||||
|
||||
- *Normally,* it doesn't work (yet) 🤔
|
||||
|
||||
- Let's look at `kubectl get ingress` again
|
||||
|
||||
- ExternalDNS is trying to create records mapping HOSTS to ADDRESS
|
||||
|
||||
- But the ADDRESS field is currently empty!
|
||||
|
||||
- We need to tell Traefik to fill that ADDRESS field
|
||||
|
||||
---
|
||||
|
||||
## Reconfiguring Traefik
|
||||
|
||||
- There is a "magic" flag to tell Traefik to update the address status field
|
||||
|
||||
- Let's update our Traefik install:
|
||||
```bash
|
||||
helm upgrade --install traefik traefik/traefik \
|
||||
--create-namespace --namespace traefik \
|
||||
--set "ports.websecure.tls.enabled=true" \
|
||||
--set "providers.kubernetesIngress.publishedService.enabled=true"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking what we did
|
||||
|
||||
- Check the output of `kubectl get ingress`
|
||||
|
||||
(there should be an address now)
|
||||
|
||||
- Check the logs of ExternalDNS
|
||||
|
||||
(there should be a mention of the new DNS record)
|
||||
|
||||
- Try again to connect to the HTTP address
|
||||
|
||||
(now it should work)
|
||||
|
||||
- Note that some of these operations might take a minute or two
|
||||
|
||||
(be patient!)
|
||||
|
||||
???
|
||||
|
||||
:T: Installing the Traefik Ingress Controller
|
||||
|
||||
:Q: What's the job of an Ingress Controller?
|
||||
:A: Prevent unauthorized access to Kubernetes services
|
||||
:A: Firewall inbound traffic on the Kubernetes API
|
||||
:A: ✔️Handle inbound HTTP traffic for Kubernetes services
|
||||
:A: Keep track of the location of Kubernetes operators
|
||||
|
||||
:Q: What happens when we create an "Ingress resource"?
|
||||
:A: A web service is automatically deployed and scaled on our cluster
|
||||
:A: Kubernetes starts tracking the location of our users
|
||||
:A: Traffic coming from the specified addresses will be allowed
|
||||
:A: ✔️A load balancer is configured with HTTP traffic rules
|
||||
87
slides/lke/what-is-missing.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# DNS, Ingress, Metrics
|
||||
|
||||
- We got a basic app up and running
|
||||
|
||||
- We accessed it over a raw IP address
|
||||
|
||||
- Can we do better?
|
||||
|
||||
(i.e. access it with a domain name!)
|
||||
|
||||
- How much resources is it using?
|
||||
|
||||
---
|
||||
|
||||
## DNS
|
||||
|
||||
- We'd like to associate a fancy name to that LoadBalancer Service
|
||||
|
||||
(e.g. `nginx.cloudnative.party` → `A.B.C.D`)
|
||||
|
||||
- option 1: manually add a DNS record
|
||||
|
||||
- option 2: find a way to create DNS records automatically
|
||||
|
||||
- We will install ExternalDNS to automate DNS records creatoin
|
||||
|
||||
- ExternalDNS supports Linode DNS and dozens of other providers
|
||||
|
||||
---
|
||||
|
||||
## Ingress
|
||||
|
||||
- What if we have multiple web services to expose?
|
||||
|
||||
- We could create one LoadBalancer Service for each of them
|
||||
|
||||
- This would create a lot of cloud load balancers
|
||||
|
||||
(and they typically incur a cost, even if it's a small one)
|
||||
|
||||
- Instead, we can use an *Ingress Controller*
|
||||
|
||||
- Ingress Controller = HTTP load balancer / reverse proxy
|
||||
|
||||
- Put all our HTTP services behind a single LoadBalancer Service
|
||||
|
||||
- Can also do fancy "content-based" routing (using headers, request path...)
|
||||
|
||||
- We will install Traefik as our Ingress Controller
|
||||
|
||||
---
|
||||
|
||||
## Metrics
|
||||
|
||||
- How much resources are we using right now?
|
||||
|
||||
- When will we need to scale up our cluster?
|
||||
|
||||
- We need metrics!
|
||||
|
||||
- We're going to install the *metrics server*
|
||||
|
||||
- It's a very basic metrics system
|
||||
|
||||
(no retention, no graphs, no alerting...)
|
||||
|
||||
- But it's lightweight, and it is used internally by Kubernetes for autoscaling
|
||||
|
||||
---
|
||||
|
||||
## What's next
|
||||
|
||||
- We're going to install all these components
|
||||
|
||||
- Very often, things can be installed with a simple YAML file
|
||||
|
||||
- Very often, that YAML file needs to be customized a little bit
|
||||
|
||||
(add command-line parameters, provide API tokens...)
|
||||
|
||||
- Instead, we're going to use Helm charts
|
||||
|
||||
- Helm charts give us a way to customize what we deploy
|
||||
|
||||
- Helm can also keep track of what we install
|
||||
|
||||
(for easier uninstall and updates)
|
||||
@@ -1,35 +1,15 @@
|
||||
## Intros
|
||||
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
- Hello! I'm Jérôme Petazzoni
|
||||
|
||||
- Hello! We are:
|
||||
([@jpetazzo](https://twitter.com/jpetazzo) on Twitter)
|
||||
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
- I worked at Docker from \~2011 to 2018
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
- I'm now doing consulting, training, etc. on Docker & Kubernetes
|
||||
|
||||
<!-- .dummy[
|
||||
(check out [container.training](https://container.training/)!)
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
- I'll show you how to deploy a complete CI/CD pipeline on LKE!
|
||||
|
||||
- .emoji[🚁] Alexandre ([@alexbuisine](https://twitter.com/alexbuisine), Enix SAS)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
|
||||
- .emoji[🎧] Romain ([@rdegez](https://twitter.com/rdegez), Enix SAS)
|
||||
|
||||
] -->
|
||||
|
||||
- The workshop will run from ...
|
||||
|
||||
- There will be a lunch break at ...
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
(Linode Kubernetes Engine 😎)
|
||||
|
||||
@@ -213,6 +213,7 @@ def processcontent(content, filename):
|
||||
return (content, titles)
|
||||
if os.path.isfile(content):
|
||||
return processcontent(open(content).read(), content)
|
||||
logging.warning("Content spans only one line (it's probably a file name) but no file found: {}".format(content))
|
||||
if isinstance(content, list):
|
||||
subparts = [processcontent(c, filename) for c in content]
|
||||
markdown = "\n---\n".join(c[0] for c in subparts)
|
||||
|
||||
12
slides/shared/chat-room-slack.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Chat room
|
||||
|
||||
- A Slack room has been set up for the duration of the training
|
||||
|
||||
- We'll use it to ask questions, get help, share feedback ...
|
||||
|
||||
(let's keep an eye on it during the training!)
|
||||
|
||||
- Reminder, the room is @@CHAT@@
|
||||
|
||||
- Say hi in the chat room!
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Our sample application
|
||||
|
||||
- We will clone the GitHub repository onto our `node1`
|
||||
- I'm going to run our demo app locally, with Docker
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
(you don't have to do that; do it if you like!)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -15,7 +15,7 @@ fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
- Clone the repository:
|
||||
```bash
|
||||
git clone https://@@GITREPO@@
|
||||
```
|
||||
@@ -34,7 +34,7 @@ Let's start this before we look around, as downloading will take a little time..
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
cd container.training/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
|
||||
@@ -6,6 +6,6 @@ Thank you!
|
||||
|
||||
class: title, in-person
|
||||
|
||||
That's all, folks! <br/> Questions?
|
||||
That's all, folks! <br/> Thank you ✨
|
||||
|
||||

|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,78 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,74 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -29,7 +29,7 @@ code {
|
||||
code.remark-code {
|
||||
font-size: 100%;
|
||||
}
|
||||
.exercise ul li code.remark-code.hljs.bash {
|
||||
.x-exercise ul li code.remark-code.hljs.bash {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
@@ -109,8 +109,17 @@ div.pic p {
|
||||
div.pic img {
|
||||
display: block;
|
||||
margin: auto;
|
||||
/*
|
||||
"pic" class slides should have a single, full screen picture.
|
||||
We used to have these attributes below but they prevented
|
||||
pictures from taking up the whole slide. Replacing them with
|
||||
100%/100% seems to put the pictures full screen, but I've left
|
||||
these old attributes here just in case.
|
||||
max-width: 1210px;
|
||||
max-height: 550px;
|
||||
*/
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
}
|
||||
div.pic h1, div.pic h2, div.title h1, div.title h2 {
|
||||
text-align: center;
|
||||
@@ -168,7 +177,7 @@ div img {
|
||||
background-repeat: no-repeat;
|
||||
background-position: left;
|
||||
}
|
||||
.exercise {
|
||||
.x-exercise {
|
||||
background-color: #eee;
|
||||
background-image: url("images/keyboard.png");
|
||||
background-size: 1.4em;
|
||||
@@ -176,7 +185,7 @@ div img {
|
||||
background-position: 0.2em 0.2em;
|
||||
border: 2px dotted black;
|
||||
}
|
||||
.exercise:before {
|
||||
.x-exercise:before {
|
||||
content: "Exercise";
|
||||
margin-left: 1.8em;
|
||||
}
|
||||
|
||||