mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-24 20:16:57 +00:00
Compare commits
1 Commits
2021-06-mb
...
2021-04-su
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5947e5f5af |
@@ -1,6 +1,3 @@
|
||||
# Note: hyperkube isn't available after Kubernetes 1.18.
|
||||
# So we'll have to update this for Kubernetes 1.19!
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hackthecluster
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hackthecluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hackthecluster
|
||||
spec:
|
||||
volumes:
|
||||
- name: slash
|
||||
hostPath:
|
||||
path: /
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: slash
|
||||
mountPath: /hostfs
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
securityContext:
|
||||
#privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_CHROOT
|
||||
@@ -1,9 +1,5 @@
|
||||
export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
# Ignore SSH key validation when connecting to these remote hosts.
|
||||
# (Otherwise, deployment scripts break when a VM IP address reuse.)
|
||||
SSHOPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR"
|
||||
|
||||
HELP=""
|
||||
_cmd() {
|
||||
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
|
||||
@@ -126,7 +122,7 @@ _cmd_deploy() {
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
@@ -170,27 +166,24 @@ _cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
##VERSION##
|
||||
ETCD_VERSION=v3.4.13
|
||||
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
|
||||
CNI_VERSION=v0.8.7
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz \
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.18.10/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -259,7 +252,7 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
|
||||
TOKEN=\$(ssh $SSHOPTS \$FIRSTNODE cat /tmp/token) &&
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
|
||||
fi"
|
||||
|
||||
@@ -330,7 +323,7 @@ EOF"
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
@@ -345,17 +338,13 @@ EOF"
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
# Install k9s and popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
VERSION=v0.24.10 &&
|
||||
FILENAME=k9s_\${VERSION}_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/download/\$VERSION/\$FILENAME |
|
||||
FILENAME=k9s_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
fi
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
@@ -435,7 +424,7 @@ _cmd_inventory() {
|
||||
case "$1" in
|
||||
"")
|
||||
for INFRA in infra/*; do
|
||||
$0 inventory $INFRA
|
||||
$0 list $INFRA
|
||||
done
|
||||
;;
|
||||
*/example.*)
|
||||
@@ -585,8 +574,7 @@ _cmd_ssh() {
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh $SSHOPTS docker@$IP
|
||||
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
@@ -725,7 +713,7 @@ _cmd_tmux() {
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Opening ssh+tmux with $IP"
|
||||
rm -f /tmp/tmux-$UID/default
|
||||
ssh $SSHOPTS -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
@@ -763,7 +751,11 @@ _cmd_passwords() {
|
||||
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
|
||||
info "Setting password for $nodes..."
|
||||
for node in $nodes; do
|
||||
echo docker:$password | ssh $SSHOPTS ubuntu@$node sudo chpasswd
|
||||
echo docker:$password | ssh \
|
||||
-o LogLevel=ERROR \
|
||||
-o UserKnownHostsFile=/dev/null \
|
||||
-o StrictHostKeyChecking=no \
|
||||
ubuntu@$node sudo chpasswd
|
||||
done
|
||||
done
|
||||
info "Done."
|
||||
@@ -897,7 +889,10 @@ test_vm() {
|
||||
"ls -la /home/docker/.ssh"; do
|
||||
sep "$cmd"
|
||||
echo "$cmd" \
|
||||
| ssh -A $SSHOPTS $user@$ip sudo -u docker -i \
|
||||
| ssh -A -q \
|
||||
-o "UserKnownHostsFile /dev/null" \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
$user@$ip sudo -u docker -i \
|
||||
|| {
|
||||
status=$?
|
||||
error "$cmd exit status: $status"
|
||||
|
||||
@@ -217,7 +217,7 @@ aws_tag_instances() {
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${AWS_ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
aws_greet() {
|
||||
|
||||
@@ -1,28 +1,20 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
if ! terraform init; then
|
||||
error "'terraform init' failed."
|
||||
error "If it mentions the following error message:"
|
||||
error "openpgp: signature made by unknown entity."
|
||||
error "Then you need to upgrade Terraform to 0.11.15"
|
||||
error "to upgrade its signing keys following the"
|
||||
error "codecov breach."
|
||||
die "Aborting."
|
||||
fi
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
24
prepare-vms/settings/kube101.yaml
Normal file
24
prepare-vms/settings/kube101.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# 3 nodes for k8s 101 workshops
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -30,7 +30,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
@@ -45,7 +45,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
@@ -60,7 +60,7 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
@@ -79,9 +79,10 @@ TAG=$PREFIX-$SETTINGS
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--students $STUDENTS
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.19.11
|
||||
retry 5 ./workshopctl kube $TAG 1.17.13
|
||||
retry 5 ./workshopctl webssh $TAG
|
||||
retry 5 ./workshopctl tailhist $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
33
slides/1.yml
Normal file
33
slides/1.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
title: |
|
||||
Container Foundations
|
||||
|
||||
chat: "`#infra-summit-container-foundation`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-04-summit.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- # DAY 1
|
||||
- containers/Local_Environment.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
-
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
# FIXME update Compose section so that folks can test the app
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- shared/thankyou.md
|
||||
53
slides/2.yml
Normal file
53
slides/2.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
title: |
|
||||
Advanced Containers
|
||||
|
||||
chat: "`#infra-summit-adv-containers`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-04-summit.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- # DAY 2
|
||||
- |
|
||||
# Dockerfile Homework Review
|
||||
|
||||
Yesterday, we concluded the "Container Foundations" workshop with
|
||||
a little homework assignment.
|
||||
|
||||
Let's look at it!
|
||||
|
||||
The repository is here:
|
||||
|
||||
https://github.com/jpetazzo/wordsmith
|
||||
|
||||
(You can find proposed solutions in various branches of this repository.)
|
||||
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Naming_And_Inspecting.md
|
||||
#- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
-
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
#- containers/Init_Systems.md
|
||||
- shared/thankyou.md
|
||||
#- containers/links.md
|
||||
-
|
||||
- |
|
||||
# (Extra Content)
|
||||
- containers/Start_And_Attach.md
|
||||
- one-more-thing.md
|
||||
@@ -2,7 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
/ /summit.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
1132
slides/autopilot/package-lock.json
generated
1132
slides/autopilot/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -10,9 +10,7 @@ Let's write Dockerfiles for an existing application!
|
||||
|
||||
4. Build and test them individually
|
||||
|
||||
<!--
|
||||
5. Test them together with the provided Compose file
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
@@ -77,7 +75,7 @@ That's all we care about for now!
|
||||
|
||||
Bonus question: make sure that each container stops correctly when hitting Ctrl-C.
|
||||
|
||||
???
|
||||
---
|
||||
|
||||
## Test with a Compose file
|
||||
|
||||
|
||||
@@ -125,27 +125,23 @@ root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
|
||||
* `wc -l` counts them
|
||||
|
||||
How many packages do we have on our host?
|
||||
This is a fairly minimal Ubuntu install.
|
||||
|
||||
(We don't even have `ifconfig` or `ip`!)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Counting packages on the host
|
||||
## If we were running Docker on Linux...
|
||||
|
||||
Exit the container by logging out of the shell, like you would usually do.
|
||||
Specifically, on Ubuntu...
|
||||
|
||||
(E.g. with `^D` or `exit`)
|
||||
We could compare with the number of packages on the host.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# exit
|
||||
```
|
||||
(For comparison: about 500 on a stock Ubuntu Cloud Image.)
|
||||
|
||||
Now, try to:
|
||||
|
||||
* run `dpkg -l | wc -l`. How many packages are installed?
|
||||
|
||||
* run `figlet`. Does that work?
|
||||
We could see that installing `figlet` in the container *did not* install it on the host.
|
||||
|
||||
---
|
||||
|
||||
|
||||
57
slides/containers/Local_Environment.md
Normal file
57
slides/containers/Local_Environment.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Getting started
|
||||
|
||||
What are we going to learn?
|
||||
|
||||
A multi-layered approach! 🧅
|
||||
|
||||
- If you haven't used Docker yet: I'll show you how to get started.
|
||||
|
||||
- If you *have* already used Docker: I'll give you details, tips, tricks...
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
*Tell me and I forget.*
|
||||
<br/>
|
||||
*Teach me and I remember.*
|
||||
<br/>
|
||||
*Involve me and I learn.*
|
||||
|
||||
Misattributed to Benjamin Franklin
|
||||
|
||||
[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/)
|
||||
|
||||
---
|
||||
|
||||
## Hands-on sections
|
||||
|
||||
- The whole workshop is hands-on
|
||||
|
||||
- We are going to build, ship, and run containers!
|
||||
|
||||
- The slides are here to help ...
|
||||
|
||||
- ... But most of the interesting action will be in the terminal
|
||||
|
||||
- You are invited to reproduce all the demos
|
||||
|
||||
(if you feel like it!)
|
||||
|
||||
---
|
||||
|
||||
## What do we need?
|
||||
|
||||
- Docker!
|
||||
|
||||
- on Linux
|
||||
|
||||
- on Mac with Intel Silicon
|
||||
|
||||
- on Mac with Apple Silicon
|
||||
|
||||
- on Windows
|
||||
|
||||
- For the purpose of this workshop, there shouldn't be significant differences
|
||||
|
||||
(but we'll talk about these differences a little bit later)
|
||||
@@ -11,10 +11,10 @@ class State(object):
|
||||
self.section_title = None
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
self.parts = {}
|
||||
self.modules = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("part-"):
|
||||
if self.section_title.startswith("module-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
@@ -38,10 +38,10 @@ for line in open(sys.argv[1]):
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("part-"):
|
||||
if state.section_title not in state.parts:
|
||||
state.parts[state.section_title] = []
|
||||
state.parts[state.section_title].append(toc_links[0])
|
||||
if toc_links and state.section_title.startswith("module-"):
|
||||
if state.section_title not in state.modules:
|
||||
state.modules[state.section_title] = []
|
||||
state.modules[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
|
||||
|
||||
state.show()
|
||||
|
||||
for part in sorted(state.parts, key=lambda f: int(f.split("-")[1])):
|
||||
part_size = sum(state.sections[s] for s in state.parts[part])
|
||||
print("{}\t{}\t{}".format("total size for", part, part_size))
|
||||
for module in sorted(state.modules, key=lambda f: int(f.split("-")[1])):
|
||||
module_size = sum(state.sections[s] for s in state.modules[module])
|
||||
print("{}\t{}\t{}".format("total size for", module, module_size))
|
||||
|
||||
|
||||
@@ -112,10 +112,7 @@ TEMPLATE="""<html>
|
||||
{% for item in all_past_workshops %}
|
||||
<tr>
|
||||
<td>{{ item.title }}</td>
|
||||
<td>{% if item.slides %}<a class="slides" href="{{ item.slides }}" />
|
||||
{% else %}
|
||||
<p class="details">{{ item.status }}</p>
|
||||
{% endif %}</td>
|
||||
<td><a class="slides" href="{{ item.slides }}" /></td>
|
||||
{% if item.video %}
|
||||
<td><a class="video" href="{{ item.video }}" /></td>
|
||||
{% endif %}
|
||||
|
||||
@@ -1,103 +1,3 @@
|
||||
- date: [2021-09-27, 2021-09-29]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-10-04, 2021-10-07]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-10-11, 2021-10-12]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-11-08, 2021-11-16]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-11-18, 2021-11-19]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
#slides: https://2021-05-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2021-05-10, 2021-05-12]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-05-17, 2021-05-20]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-05-24, 2021-05-25]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-05-26, 2021-05-28]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-05-31, 2021-06-01]
|
||||
country: www
|
||||
city: streaming
|
||||
event: ENIX SAS
|
||||
speaker: jpetazzo
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-05-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2021-02-08, 2021-02-10]
|
||||
country: www
|
||||
city: streaming
|
||||
@@ -106,7 +6,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2021-02-15, 2021-02-18]
|
||||
country: www
|
||||
@@ -116,7 +15,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/2.yml.html
|
||||
|
||||
- date: [2021-02-22, 2021-02-23]
|
||||
country: www
|
||||
@@ -126,7 +24,6 @@
|
||||
title: Packaging et CI/CD pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2021-02-24, 2021-02-26]
|
||||
country: www
|
||||
@@ -136,7 +33,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2021-03-01, 2021-03-02]
|
||||
country: www
|
||||
@@ -146,7 +42,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2021-02-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2020-10-05, 2020-10-06]
|
||||
country: www
|
||||
@@ -156,7 +51,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2020-10-07, 2020-10-09]
|
||||
country: www
|
||||
@@ -166,7 +60,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/2.yml.html
|
||||
|
||||
- date: 2020-10-12
|
||||
country: www
|
||||
@@ -176,7 +69,6 @@
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2020-10-13, 2020-10-14]
|
||||
country: www
|
||||
@@ -186,7 +78,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2020-10-19, 2020-10-20]
|
||||
country: www
|
||||
@@ -196,7 +87,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-10-enix.container.training/5.yml.html
|
||||
|
||||
- date: [2020-09-28, 2020-10-01]
|
||||
country: www
|
||||
@@ -205,7 +95,6 @@
|
||||
speaker: jpetazzo
|
||||
title: Advanced Kubernetes Concepts
|
||||
attend: https://skillsmatter.com/courses/700-advanced-kubernetes-concepts-workshop-jerome-petazzoni
|
||||
slides: https://2020-09-skillsmatter.container.training/
|
||||
|
||||
- date: [2020-08-29, 2020-08-30]
|
||||
country: www
|
||||
@@ -241,7 +130,6 @@
|
||||
title: Docker intensif (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/1.yml.html
|
||||
|
||||
- date: [2020-06-17, 2020-06-19]
|
||||
country: www
|
||||
@@ -251,7 +139,6 @@
|
||||
title: Fondamentaux Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/2.yml.html
|
||||
|
||||
- date: 2020-06-22
|
||||
country: www
|
||||
@@ -261,7 +148,6 @@
|
||||
title: Packaging pour Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/3.yml.html
|
||||
|
||||
- date: [2020-06-23, 2020-06-24]
|
||||
country: www
|
||||
@@ -271,7 +157,6 @@
|
||||
title: Kubernetes avancé (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/4.yml.html
|
||||
|
||||
- date: [2020-06-25, 2020-06-26]
|
||||
country: www
|
||||
@@ -281,8 +166,6 @@
|
||||
title: Opérer Kubernetes (en français)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/online/
|
||||
slides: https://2020-06-enix.container.training/5.yml.html
|
||||
|
||||
|
||||
- date: [2020-06-09, 2020-06-11]
|
||||
country: www
|
||||
|
||||
@@ -187,7 +187,7 @@ Note: we can update a CRD without having to re-create the corresponding resource
|
||||
|
||||
---
|
||||
|
||||
## OpenAPI v3 schema example
|
||||
## OpenAPI v3 scheme exapmle
|
||||
|
||||
This is what we have in @@LINK[k8s/coffee-3.yaml]:
|
||||
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
# Exercise — sealed secrets
|
||||
|
||||
This is a "combo exercise" to practice the following concepts:
|
||||
|
||||
- Secrets (mounting them in containers)
|
||||
|
||||
- RBAC (granting specific permissions to specific users)
|
||||
|
||||
- Operators (specifically, sealed secrets)
|
||||
|
||||
- Migrations (copying/transferring resources from a cluster to another)
|
||||
|
||||
For this exercise, you will need two clusters.
|
||||
|
||||
(It can be two local clusters.)
|
||||
|
||||
We will call them "source cluster" and "target cluster".
|
||||
|
||||
---
|
||||
|
||||
## Step 1 (easy)
|
||||
|
||||
- Install the sealed secrets operator on both clusters
|
||||
|
||||
- On source cluster, create a Namespace called `dev`
|
||||
|
||||
- Create two sealed secrets, `verysecure` and `veryverysecure`
|
||||
|
||||
(the content doesn't matter; put a random string of your choice)
|
||||
|
||||
- Create a Deployment called `app` using both secrets
|
||||
|
||||
(use a mount or environment variables; whatever you prefer!)
|
||||
|
||||
- Verify that the secrets are available to the Deployment
|
||||
|
||||
---
|
||||
|
||||
## Step 2 (medium)
|
||||
|
||||
- Create another Namespace called `prod`
|
||||
|
||||
(on the source cluster)
|
||||
|
||||
- Create the same Deployment `app` using both secrets
|
||||
|
||||
- Verify that the secrets are available to the Deployment
|
||||
|
||||
---
|
||||
|
||||
## Step 3 (hard)
|
||||
|
||||
- On the target cluster, create a Namespace called `prod`
|
||||
|
||||
- Create the `app` Deployment and both sealed secrets
|
||||
|
||||
(do not copy the Secrets; only the sealed secrets)
|
||||
|
||||
- Check the next slide if you need a hint!
|
||||
|
||||
--
|
||||
|
||||
- You will have to copy the Sealed Secret private key
|
||||
|
||||
---
|
||||
|
||||
## Step 4 (medium)
|
||||
|
||||
On the target cluster, create the Namespace `dev`.
|
||||
|
||||
Let's say that user `alice` has access to the target cluster.
|
||||
|
||||
(You can use `kubectl --as=alice` to impersonate her.)
|
||||
|
||||
We want Alice to be able to:
|
||||
|
||||
- deploy the whole application
|
||||
|
||||
- access the `verysecure` secret
|
||||
|
||||
- but *not* the `veryverysecure` secret
|
||||
|
||||
---
|
||||
|
||||
## Step 5 (hard)
|
||||
|
||||
- Make sure that Alice can view the logs of the Deployment
|
||||
|
||||
- Can you think of a way for Alice to access the `veryverysecure` Secret?
|
||||
|
||||
(check next slide for a hint)
|
||||
|
||||
--
|
||||
|
||||
- `kubectl exec`, maybe?
|
||||
|
||||
--
|
||||
|
||||
- Can you think of a way to prevent that?
|
||||
@@ -160,8 +160,3 @@ class: extra-details
|
||||
- The problem was fixed in Kubernetes 1.13
|
||||
|
||||
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
|
||||
|
||||
???
|
||||
|
||||
:EN:- Viewing logs with "kubectl logs"
|
||||
:FR:- Consulter les logs avec "kubectl logs"
|
||||
|
||||
@@ -1,182 +1,69 @@
|
||||
# Checking Node and Pod resource usage
|
||||
# Checking pod and node resource usage
|
||||
|
||||
- We've installed a few things on our cluster so far
|
||||
- Since Kubernetes 1.8, metrics are collected by the [resource metrics pipeline](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/)
|
||||
|
||||
- How much resources (CPU, RAM) are we using?
|
||||
- The resource metrics pipeline is:
|
||||
|
||||
- We need metrics!
|
||||
- optional (Kubernetes can function without it)
|
||||
|
||||
- necessary for some features (like the Horizontal Pod Autoscaler)
|
||||
|
||||
- exposed through the Kubernetes API using the [aggregation layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
|
||||
- usually implemented by the "metrics server"
|
||||
|
||||
---
|
||||
|
||||
## How to know if the metrics server is running?
|
||||
|
||||
- The easiest way to know is to run `kubectl top`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's try the following command:
|
||||
- Check if the core metrics pipeline is available:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If it shows our nodes and their CPU and memory load, we're good!
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics server
|
||||
|
||||
- The metrics server doesn't have any particular requirements
|
||||
|
||||
(it doesn't need persistence, as it doesn't *store* metrics)
|
||||
|
||||
- It has its own repository, [kubernetes-incubator/metrics-server](https://github.com/kubernetes-incubator/metrics-server)
|
||||
|
||||
- The repository comes with [YAML files for deployment](https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8%2B)
|
||||
|
||||
- These files may not work on some clusters
|
||||
|
||||
(e.g. if your node names are not in DNS)
|
||||
|
||||
- The container.training repository has a [metrics-server.yaml](https://github.com/jpetazzo/container.training/blob/master/k8s/metrics-server.yaml#L90) file to help with that
|
||||
|
||||
(we can `kubectl apply -f` that file if needed)
|
||||
|
||||
---
|
||||
|
||||
## Showing container resource usage
|
||||
|
||||
- Once the metrics server is running, we can check container resource usage
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show resource usage across all containers:
|
||||
```bash
|
||||
kubectl top pods --containers --all-namespaces
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is metrics-server installed?
|
||||
|
||||
- If we see a list of nodes, with CPU and RAM usage:
|
||||
|
||||
*great, metrics-server is installed!*
|
||||
|
||||
- If we see `error: Metrics API not available`:
|
||||
|
||||
*metrics-server isn't installed, so we'll install it!*
|
||||
|
||||
---
|
||||
|
||||
## The resource metrics pipeline
|
||||
|
||||
- The `kubectl top` command relies on the Metrics API
|
||||
|
||||
- The Metrics API is part of the "[resource metrics pipeline]"
|
||||
|
||||
- The Metrics API isn't served (built into) the Kubernetes API server
|
||||
|
||||
- It is made available through the [aggregation layer]
|
||||
|
||||
- It is usually served by a component called metrics-server
|
||||
|
||||
- It is optional (Kubernetes can function without it)
|
||||
|
||||
- It is necessary for some features (like the Horizontal Pod Autoscaler)
|
||||
|
||||
[resource metrics pipeline]: https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/
|
||||
[aggregation layer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/
|
||||
|
||||
---
|
||||
|
||||
## Other ways to get metrics
|
||||
|
||||
- We could use a SAAS like Datadog, New Relic...
|
||||
|
||||
- We could use a self-hosted solution like Prometheus
|
||||
|
||||
- Or we could use metrics-server
|
||||
|
||||
- What's special about metrics-server?
|
||||
|
||||
---
|
||||
|
||||
## Pros/cons
|
||||
|
||||
Cons:
|
||||
|
||||
- no data retention (no history data, just instant numbers)
|
||||
|
||||
- only CPU and RAM of nodes and pods (no disk or network usage or I/O...)
|
||||
|
||||
Pros:
|
||||
|
||||
- very lightweight
|
||||
|
||||
- doesn't require storage
|
||||
|
||||
- used by Kubernetes autoscaling
|
||||
|
||||
---
|
||||
|
||||
## Why metrics-server
|
||||
|
||||
- We may install something fancier later
|
||||
|
||||
(think: Prometheus with Grafana)
|
||||
|
||||
- But metrics-server will work in *minutes*
|
||||
|
||||
- It will barely use resources on our cluster
|
||||
|
||||
- It's required for autoscaling anyway
|
||||
|
||||
---
|
||||
|
||||
## How metric-server works
|
||||
|
||||
- It runs a single Pod
|
||||
|
||||
- That Pod will fetch metrics from all our Nodes
|
||||
|
||||
- It will expose them through the Kubernetes API agregation layer
|
||||
|
||||
(we won't say much more about that agregation layer; that's fairly advanced stuff!)
|
||||
|
||||
---
|
||||
|
||||
## Installing metrics-server
|
||||
|
||||
- In a lot of places, this is done with a little bit of custom YAML
|
||||
|
||||
(derived from the [official installation instructions](https://github.com/kubernetes-sigs/metrics-server#installation))
|
||||
|
||||
- We're going to use Helm one more time:
|
||||
```bash
|
||||
helm upgrade --install metrics-server bitnami/metrics-server \
|
||||
--create-namespace --namespace metrics-server \
|
||||
--set apiService.create=true \
|
||||
--set extraArgs.kubelet-insecure-tls=true \
|
||||
--set extraArgs.kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
- What are these options for?
|
||||
|
||||
---
|
||||
|
||||
## Installation options
|
||||
|
||||
- `apiService.create=true`
|
||||
|
||||
register `metrics-server` with the Kubernetes agregation layer
|
||||
|
||||
(create an entry that will show up in `kubectl get apiservices`)
|
||||
|
||||
- `extraArgs.kubelet-insecure-tls=true`
|
||||
|
||||
when connecting to nodes to collect their metrics, don't check kubelet TLS certs
|
||||
|
||||
(because most kubelet certs include the node name, but not its IP address)
|
||||
|
||||
- `extraArgs.kubelet-preferred-address-types=InternalIP`
|
||||
|
||||
when connecting to nodes, use their internal IP address instead of node name
|
||||
|
||||
(because the latter requires an internal DNS, which is rarely configured)
|
||||
|
||||
---
|
||||
|
||||
## Testing metrics-server
|
||||
|
||||
- After a minute or two, metrics-server should be up
|
||||
|
||||
- We should now be able to check Nodes resource usage:
|
||||
```bash
|
||||
kubectl top nodes
|
||||
```
|
||||
|
||||
- And Pods resource usage, too:
|
||||
```bash
|
||||
kubectl top pods --all-namespaces
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Keep some padding
|
||||
|
||||
- The RAM usage that we see should correspond more or less to the Resident Set Size
|
||||
|
||||
- Our pods also need some extra space for buffers, caches...
|
||||
|
||||
- Do not aim for 100% memory usage!
|
||||
|
||||
- Some more realistic targets:
|
||||
|
||||
50% (for workloads with disk I/O and leveraging caching)
|
||||
|
||||
90% (on very big nodes with mostly CPU-bound workloads)
|
||||
|
||||
75% (anywhere in between!)
|
||||
- We can also use selectors (`-l app=...`)
|
||||
|
||||
---
|
||||
|
||||
@@ -196,8 +83,5 @@ Pros:
|
||||
|
||||
???
|
||||
|
||||
:EN:- The resource metrics pipeline
|
||||
:EN:- Installing metrics-server
|
||||
|
||||
:EN:- Le *resource metrics pipeline*
|
||||
:FR:- Installtion de metrics-server
|
||||
:EN:- The *core metrics pipeline*
|
||||
:FR:- Le *core metrics pipeline*
|
||||
|
||||
@@ -519,11 +519,3 @@ class: extra-details
|
||||
- The Pod will then be able to start
|
||||
|
||||
- Failover is complete!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Understanding Container Attached Storage (CAS)
|
||||
:EN:- Deploying stateful apps with OpenEBS
|
||||
|
||||
:FR:- Comprendre le "Container Attached Storage" (CAS)
|
||||
:FR:- Déployer une application "stateful" avec OpenEBS
|
||||
@@ -1,123 +0,0 @@
|
||||
# Prometheus and Grafana
|
||||
|
||||
- What if we want metrics retention, view graphs, trends?
|
||||
|
||||
- A very popular combo is Prometheus+Grafana:
|
||||
|
||||
- Prometheus as the "metrics engine"
|
||||
|
||||
- Grafana to display comprehensive dashboards
|
||||
|
||||
- Prometheus also has an alert-manager component to trigger alerts
|
||||
|
||||
(we won't talk about that one)
|
||||
|
||||
---
|
||||
|
||||
## Installing Prometheus and Grafana
|
||||
|
||||
- A complete metrics stack needs at least:
|
||||
|
||||
- the Prometheus server (collects metrics and stores them efficiently)
|
||||
|
||||
- a collection of *exporters* (exposing metrics to Prometheus)
|
||||
|
||||
- Grafana
|
||||
|
||||
- a collection of Grafana dashboards (building them from scratch is tedious)
|
||||
|
||||
- The Helm chart `kube-prometheus-stack` combines all these elements
|
||||
|
||||
- ... So we're going to use it to deploy our metrics stack!
|
||||
|
||||
---
|
||||
|
||||
## Installing `kube-prometheus-stack`
|
||||
|
||||
- Let's install that stack *directly* from its repo
|
||||
|
||||
(without doing `helm repo add` first)
|
||||
|
||||
- Otherwise, keep the same naming strategy:
|
||||
```bash
|
||||
helm upgrade --install kube-prometheus-stack kube-prometheus-stack \
|
||||
--namespace kube-prometheus-stack --create-namespace \
|
||||
--repo https://prometheus-community.github.io/helm-charts
|
||||
```
|
||||
|
||||
- This will take a minute...
|
||||
|
||||
- Then check what was installed:
|
||||
```bash
|
||||
kubectl get all --namespace kube-prometheus-stack
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Exposing Grafana
|
||||
|
||||
- Let's create an Ingress for Grafana
|
||||
```bash
|
||||
kubectl create ingress --namespace kube-prometheus-stack grafana \
|
||||
--rule=grafana.`cloudnative.party`/*=kube-prometheus-stack-grafana:80
|
||||
```
|
||||
|
||||
(as usual, make sure to use *your* domain name above)
|
||||
|
||||
- Connect to Grafana
|
||||
|
||||
(remember that the DNS record might take a few minutes to come up)
|
||||
|
||||
---
|
||||
|
||||
## Grafana credentials
|
||||
|
||||
- What could the login and password be?
|
||||
|
||||
- Let's look at the Secrets available in the namespace:
|
||||
```bash
|
||||
kubectl get secrets --namespace kube-prometheus-stack
|
||||
```
|
||||
|
||||
- There is a `kube-prometheus-stack-grafana` that looks promising!
|
||||
|
||||
- Decode the Secret:
|
||||
```bash
|
||||
kubectl get secret --namespace kube-prometheus-stack \
|
||||
kube-prometheus-stack-grafana -o json | jq '.data | map_values(@base64d)'
|
||||
```
|
||||
|
||||
- If you don't have the `jq` tool mentioned above, don't worry...
|
||||
|
||||
--
|
||||
|
||||
- The login/password is hardcoded to `admin`/`prom-operator` 😬
|
||||
|
||||
---
|
||||
|
||||
## Grafana dashboards
|
||||
|
||||
- Once logged in, click on the "Dashboards" icon on the left
|
||||
|
||||
(it's the one that looks like four squares)
|
||||
|
||||
- Then click on the "Manage" entry
|
||||
|
||||
- Then click on "Kubernetes / Compute Resources / Cluster"
|
||||
|
||||
- This gives us a breakdown of resource usage by Namespace
|
||||
|
||||
- Feel free to explore the other dashboards!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Installing Prometheus and Grafana
|
||||
:FR:- Installer Prometheus et Grafana
|
||||
|
||||
:T: Observing our cluster with Prometheus and Grafana
|
||||
|
||||
:Q: What's the relationship between Prometheus and Grafana?
|
||||
:A: Prometheus collects and graphs metrics; Grafana sends alerts
|
||||
:A: ✔️Prometheus collects metrics; Grafana displays them on dashboards
|
||||
:A: Prometheus collects and graphs metrics; Grafana is its configuration interface
|
||||
:A: Grafana collects and graphs metrics; Prometheus sends alerts
|
||||
@@ -66,7 +66,7 @@ class: extra-details
|
||||
|
||||
- Each request takes 1 second of CPU
|
||||
|
||||
- Average load: 1.66%
|
||||
- Average load: 0.16%
|
||||
|
||||
- Let's say we set a CPU limit of 10%
|
||||
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Training
|
||||
|
||||
chat: "Teams"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2021-06-mbition.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/exercise-wordsmith.md
|
||||
-
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
#- k8s/setup-managed.md
|
||||
#- k8s/setup-selfhosted.md
|
||||
-
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/exercise-configmap.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/batch-jobs.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
#- k8s/whatsnext.md
|
||||
#- k8s/lastwords.md
|
||||
- shared/thankyou.md
|
||||
#- k8s/links.md
|
||||
-
|
||||
- |
|
||||
# (Extra material)
|
||||
- k8s/ingress.md
|
||||
@@ -2,12 +2,22 @@
|
||||
|
||||
- Hello! I'm Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
|
||||
- The training will run from 9:00 to 13:00
|
||||
- The workshop will run from 9am to noon (Pacific time)
|
||||
|
||||
- There will be a few breaks
|
||||
- There will be a short break around 10:30am
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
- I can't hear, read, or see you ... but I'm helped by folks who can!
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
- Make sure to join the Slack channel (@@CHAT@@)
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
- Feel free to ask questions at any time there!
|
||||
|
||||
- Your questions will be either answered, or relayed to me ✨
|
||||
|
||||
---
|
||||
|
||||
## Let's help each other
|
||||
|
||||

|
||||
|
||||
- If someone asks a question and you know the answer, please chime in!
|
||||
|
||||
@@ -42,7 +42,7 @@ def insertslide(markdown, title):
|
||||
|
||||
before = markdown[:slide_position]
|
||||
|
||||
toclink = "toc-part-{}".format(title2part[title])
|
||||
toclink = "toc-module-{}".format(title2path[title][0])
|
||||
_titles_ = [""] + all_titles + [""]
|
||||
currentindex = _titles_.index(title)
|
||||
previouslink = anchor(_titles_[currentindex-1])
|
||||
@@ -54,7 +54,7 @@ def insertslide(markdown, title):
|
||||
|
||||
class: pic
|
||||
|
||||
.interstitial[]
|
||||
.interstitial[]
|
||||
|
||||
---
|
||||
|
||||
@@ -64,11 +64,11 @@ class: title
|
||||
{title}
|
||||
|
||||
.nav[
|
||||
[Previous part](#{previouslink})
|
||||
[Previous section](#{previouslink})
|
||||
|
|
||||
[Back to table of contents](#{toclink})
|
||||
|
|
||||
[Next part](#{nextlink})
|
||||
[Next section](#{nextlink})
|
||||
]
|
||||
|
||||
.debug[(automatically generated title slide)]
|
||||
@@ -156,44 +156,43 @@ def generatefromyaml(manifest, filename):
|
||||
return html
|
||||
|
||||
|
||||
# Maps a title (the string just after "^# ") to its position in the TOC
|
||||
# (to which part it belongs).
|
||||
title2part = {}
|
||||
# Maps a section title (the string just after "^# ") to its position
|
||||
# in the table of content (as a (module,part,subpart,...) tuple).
|
||||
title2path = {}
|
||||
all_titles = []
|
||||
|
||||
# Generate the table of contents for a tree of titles.
|
||||
# "tree" is a list of titles, potentially nested.
|
||||
# Each entry is either:
|
||||
# - a title (then it's a top-level section that doesn't show up in the TOC)
|
||||
# - a list (then it's a part that will show up in the TOC on its own slide)
|
||||
# In a list, we can have:
|
||||
# - titles (simple entry)
|
||||
# - further lists (they are then flattened; we don't represent subsubparts)
|
||||
def gentoc(tree):
|
||||
# First, remove the top-level sections that don't show up in the TOC.
|
||||
tree = [ entry for entry in tree if type(entry)==list ]
|
||||
# Then, flatten the sublists.
|
||||
tree = [ list(flatten(entry)) for entry in tree ]
|
||||
# Now, process each part.
|
||||
parts = []
|
||||
for i, part in enumerate(tree):
|
||||
slide = "name: toc-part-{}\n\n".format(i+1)
|
||||
if len(tree) == 1:
|
||||
slide += "## Table of contents\n\n"
|
||||
def gentoc(tree, path=()):
|
||||
if not tree:
|
||||
return ""
|
||||
if isinstance(tree, str):
|
||||
logging.debug("Path {} Title {}".format(path, tree))
|
||||
title = tree
|
||||
title2path[title] = path
|
||||
all_titles.append(title)
|
||||
return "- [{}](#{})".format(title, anchor(title))
|
||||
if isinstance(tree, list):
|
||||
# If there is only one sub-element, give it index zero.
|
||||
# Otherwise, elements will have indices 1-to-N.
|
||||
offset = 0 if len(tree) == 1 else 1
|
||||
logging.debug(
|
||||
"Path {} Tree [...({} sub-elements)]"
|
||||
.format(path, len(tree)))
|
||||
if len(path) == 0:
|
||||
return "\n---\n".join(gentoc(subtree, path+(i+offset,)) for (i,subtree) in enumerate(tree))
|
||||
elif len(path) == 1:
|
||||
# If there is only one module, don't show "Module 1" but just "TOC"
|
||||
if path[0] == 0:
|
||||
label = "Table of contents"
|
||||
else:
|
||||
label = "Module {}".format(path[0])
|
||||
moduleslide = "name: toc-module-{n}\n\n## {label}\n\n".format(n=path[0], label=label)
|
||||
for (i,subtree) in enumerate(tree):
|
||||
moduleslide += gentoc(subtree, path+(i+offset,)) + "\n\n"
|
||||
moduleslide += ".debug[(auto-generated TOC)]"
|
||||
return moduleslide
|
||||
else:
|
||||
slide += "## Part {}\n\n".format(i+1)
|
||||
for title in part:
|
||||
logging.debug("Generating TOC, part {}, title {}.".format(i+1, title))
|
||||
title2part[title] = i+1
|
||||
all_titles.append(title)
|
||||
slide += "- [{}](#{})\n".format(title, anchor(title))
|
||||
# If we don't have too many subparts, add some space to breathe.
|
||||
# (Otherwise, we display the titles smooched together.)
|
||||
if len(part) < 10:
|
||||
slide += "\n"
|
||||
slide += "\n.debug[(auto-generated TOC)]"
|
||||
parts.append(slide)
|
||||
return "\n---\n".join(parts)
|
||||
return "\n\n".join(gentoc(subtree, path+(i+offset,)) for (i,subtree) in enumerate(tree))
|
||||
|
||||
|
||||
# Arguments:
|
||||
|
||||
12
slides/one-more-thing.md
Normal file
12
slides/one-more-thing.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## One more thing ...
|
||||
|
||||
Actually, three more things!
|
||||
|
||||
- `docker system prune`
|
||||
|
||||
- `docker run --net host --pid host -v /:/host`
|
||||
|
||||
- "Docker on Apple Silicon and the Magic of BuildKit"
|
||||
|
||||
(Wednesday, 10am-10:30am)
|
||||
|
||||
27
slides/summit.html
Normal file
27
slides/summit.html
Normal file
@@ -0,0 +1,27 @@
|
||||
<?xml version="1.0"?>
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
td {
|
||||
background: #ccc;
|
||||
padding: 1em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Monday, April 26th 2021</td>
|
||||
<td>
|
||||
<a href="1.yml.html">Container Foundations</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Tuesday, April 27th, 2021</td>
|
||||
<td>
|
||||
<a href="2.yml.html">Advanced Containers</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user