mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 17:30:20 +00:00
Compare commits
36 Commits
wwrk-2019-
...
sfsf-2019-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08fa37dace | ||
|
|
807028cbf3 | ||
|
|
dfde597cb9 | ||
|
|
b21f61ad27 | ||
|
|
96419c6baf | ||
|
|
12da011f21 | ||
|
|
fa1637fb7e | ||
|
|
fbe2251e21 | ||
|
|
bac0d9febd | ||
|
|
b4faf10581 | ||
|
|
0ef9c87f97 | ||
|
|
313df8f9ff | ||
|
|
ef6a5f05f8 | ||
|
|
d71a636a9d | ||
|
|
990a873e81 | ||
|
|
98836d85cf | ||
|
|
c959a4c4a1 | ||
|
|
c3a796faef | ||
|
|
56cc65daf2 | ||
|
|
a541e53c78 | ||
|
|
7a63dfb0cf | ||
|
|
addd14582a | ||
|
|
093cfd1c24 | ||
|
|
8492524798 | ||
|
|
12b625d4f6 | ||
|
|
a78e99d97e | ||
|
|
161b8aed7d | ||
|
|
4f1252d0b6 | ||
|
|
1b407cbc5e | ||
|
|
dd6f3c9eee | ||
|
|
d4afae54b8 | ||
|
|
c1f9082fdc | ||
|
|
1fcb223a1d | ||
|
|
5299fbaab5 | ||
|
|
398ff5ee4f | ||
|
|
b883e6d557 |
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 1
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kubenet
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: kuberouter
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 3
|
||||
clusterprefix: test
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: admin.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -5,7 +5,7 @@ clustersize: 4
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: jerome.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
@@ -7,7 +7,7 @@ clustersize: 3
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: kube101.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-us-west-2
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
TAG=admin-dmuc
|
||||
PREFIX=$(date +%Y-%m-%d-%H-%M)
|
||||
|
||||
SETTINGS=admin-dmuc
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
@@ -17,11 +22,12 @@ TAG=admin-dmuc
|
||||
./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-kubenet
|
||||
SETTINGS=admin-kubenet
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
@@ -29,11 +35,12 @@ TAG=admin-kubenet
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-kuberouter
|
||||
SETTINGS=admin-kuberouter
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
@@ -41,11 +48,12 @@ TAG=admin-kuberouter
|
||||
./workshopctl disableaddrchecks $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
TAG=admin-test
|
||||
SETTINGS=admin-test
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$TAG.yaml \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
./workshopctl deploy $TAG
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à un
|
||||
des environnements utilisés pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine
|
||||
virtuelle avec n'importe quel client SSH.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusterprefix }}</td></tr>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>{{ clusterprefix }}{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,29 +1,88 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
|
||||
{%- set url = "http://FIXME.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- set lang = "en" -%}
|
||||
{%- set event = "training session" -%}
|
||||
{%- set backside = False -%}
|
||||
{%- set image = "kube" -%}
|
||||
{%- set clusternumber = 100 -%}
|
||||
|
||||
{%- set image_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
|
||||
"kube": "https://avatars1.githubusercontent.com/u/13629408",
|
||||
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
|
||||
}[image] -%}
|
||||
{%- if lang == "en" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
machine for this {{ event }}.
|
||||
You can connect to this VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machine is:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
cluster for this {{ event }}.
|
||||
You can connect to each VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machines are:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
machine pour cette formation.
|
||||
Vous pouvez vous connecter à cette machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresse IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clusterprefix != "node" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information for the
|
||||
<strong>{{ clusterprefix }}</strong> environment.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
cluster pour cette formation.
|
||||
Vous pouvez vous connecter à chaque machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresses IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
@@ -37,24 +96,54 @@ table {
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
{% if backside %}
|
||||
height: 31%;
|
||||
{% endif %}
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
/*
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
*/
|
||||
/**/
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
/**/
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
margin-right: -0.2em;
|
||||
}
|
||||
|
||||
/*
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
*/
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
@@ -69,19 +158,15 @@ img {
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
@@ -90,17 +175,44 @@ img {
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
{{ listhead }}
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
<tr>
|
||||
<td>{{ clusterprefix }}{{ loop.index }}:</td>
|
||||
<td>{{ node }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
|
||||
<p>
|
||||
{{ slides_are_at }}
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,134 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconuk2019.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -2,4 +2,4 @@
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
/ /kadm-fourdays.yml.html 200!
|
||||
/ /sfsf.yml.html 200!
|
||||
|
||||
@@ -86,7 +86,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be setup manually.
|
||||
* Networking has to be set up manually.
|
||||
|
||||
---
|
||||
|
||||
@@ -112,7 +112,7 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
@@ -152,7 +152,7 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be setup manually.
|
||||
* Network has to be set up manually.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
title: Kubernetes for administrators and operators
|
||||
speaker: jpetazzo
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313
|
||||
slides: https://kadm-2019-06.container.training/
|
||||
|
||||
- date: 2019-05-01
|
||||
country: us
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Kubernetes architecture deep dive
|
||||
# Kubernetes architecture
|
||||
|
||||
We can arbitrarily split Kubernetes in two parts:
|
||||
|
||||
@@ -167,11 +167,13 @@ What does that mean?
|
||||
|
||||
## Let's experiment a bit!
|
||||
|
||||
- For the exercises in this section, connect to the first node of the `test` cluster
|
||||
- For this section, we will use a cluster with 4 nodes
|
||||
|
||||
(named node1, node2, node3, node4)
|
||||
|
||||
.exercise[
|
||||
|
||||
- SSH to the first node of the test cluster
|
||||
- SSH to the first node of the cluster
|
||||
|
||||
- Check that the cluster is operational:
|
||||
```bash
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
- When the API server receives a request, it tries to authenticate it
|
||||
|
||||
(it examines headers, certificates ... anything available)
|
||||
(it examines headers, certificates... anything available)
|
||||
|
||||
- Many authentication methods are available and can be used simultaneously
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
- the user ID
|
||||
- a list of groups
|
||||
|
||||
- The API server doesn't interpret these; it'll be the job of *authorizers*
|
||||
- The API server doesn't interpret these; that'll be the job of *authorizers*
|
||||
|
||||
---
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication)
|
||||
|
||||
(carrying user and password in a HTTP header)
|
||||
(carrying user and password in an HTTP header)
|
||||
|
||||
- Authentication proxy
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
|
||||
(i.e. they are not stored in etcd or anywhere else)
|
||||
|
||||
- Users can be created (and given membership to groups) independently of the API
|
||||
- Users can be created (and added to groups) independently of the API
|
||||
|
||||
- The Kubernetes API can be set up to use your custom CA to validate client certs
|
||||
|
||||
@@ -193,7 +193,7 @@ class: extra-details
|
||||
|
||||
(the kind that you can view with `kubectl get secrets`)
|
||||
|
||||
- Service accounts are generally used to grant permissions to applications, services ...
|
||||
- Service accounts are generally used to grant permissions to applications, services...
|
||||
|
||||
(as opposed to humans)
|
||||
|
||||
@@ -217,7 +217,7 @@ class: extra-details
|
||||
|
||||
.exercise[
|
||||
|
||||
- The resource name is `serviceaccount` or `sa` in short:
|
||||
- The resource name is `serviceaccount` or `sa` for short:
|
||||
```bash
|
||||
kubectl get sa
|
||||
```
|
||||
@@ -309,7 +309,7 @@ class: extra-details
|
||||
|
||||
- The API "sees" us as a different user
|
||||
|
||||
- But neither user has any right, so we can't do nothin'
|
||||
- But neither user has any rights, so we can't do nothin'
|
||||
|
||||
- Let's change that!
|
||||
|
||||
@@ -339,9 +339,9 @@ class: extra-details
|
||||
|
||||
- A rule is a combination of:
|
||||
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete ...
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete...
|
||||
|
||||
- resources (as in "API resource", like pods, nodes, services ...)
|
||||
- resources (as in "API resource," like pods, nodes, services...)
|
||||
|
||||
- resource names (to specify e.g. one specific pod instead of all pods)
|
||||
|
||||
@@ -375,13 +375,13 @@ class: extra-details
|
||||
|
||||
- We can also define API resources ClusterRole and ClusterRoleBinding
|
||||
|
||||
- These are a superset, allowing to:
|
||||
- These are a superset, allowing us to:
|
||||
|
||||
- specify actions on cluster-wide objects (like nodes)
|
||||
|
||||
- operate across all namespaces
|
||||
|
||||
- We can create Role and RoleBinding resources within a namespaces
|
||||
- We can create Role and RoleBinding resources within a namespace
|
||||
|
||||
- ClusterRole and ClusterRoleBinding resources are global
|
||||
|
||||
@@ -389,13 +389,13 @@ class: extra-details
|
||||
|
||||
## Pods and service accounts
|
||||
|
||||
- A pod can be associated to a service account
|
||||
- A pod can be associated with a service account
|
||||
|
||||
- by default, it is associated to the `default` service account
|
||||
- by default, it is associated with the `default` service account
|
||||
|
||||
- as we've seen earlier, this service account has no permission anyway
|
||||
- as we saw earlier, this service account has no permissions anyway
|
||||
|
||||
- The associated token is exposed into the pod's filesystem
|
||||
- The associated token is exposed to the pod's filesystem
|
||||
|
||||
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
|
||||
|
||||
@@ -460,7 +460,7 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
It's important to note a couple of details in these flags ...
|
||||
It's important to note a couple of details in these flags...
|
||||
|
||||
---
|
||||
|
||||
@@ -493,13 +493,13 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
- again, the command would have worked fine (no error)
|
||||
|
||||
- ... but our API requests would have been denied later
|
||||
- ...but our API requests would have been denied later
|
||||
|
||||
- What's about the `default:` prefix?
|
||||
|
||||
- that's the namespace of the service account
|
||||
|
||||
- yes, it could be inferred from context, but ... `kubectl` requires it
|
||||
- yes, it could be inferred from context, but... `kubectl` requires it
|
||||
|
||||
---
|
||||
|
||||
@@ -590,7 +590,7 @@ class: extra-details
|
||||
|
||||
*In many situations, these roles will be all you need.*
|
||||
|
||||
*You can also customize them if needed!*
|
||||
*You can also customize them!*
|
||||
|
||||
---
|
||||
|
||||
@@ -652,7 +652,7 @@ class: extra-details
|
||||
kubectl describe clusterrolebinding cluster-admin
|
||||
```
|
||||
|
||||
- This binding associates `system:masters` to the cluster role `cluster-admin`
|
||||
- This binding associates `system:masters` with the cluster role `cluster-admin`
|
||||
|
||||
- And the `cluster-admin` is, basically, `root`:
|
||||
```bash
|
||||
@@ -667,7 +667,7 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- Here is a proof-of-concept tool by Aqua Security, doing exactly that:
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# TLS bootstrap (extra material)
|
||||
# TLS bootstrap
|
||||
|
||||
- kubelet needs TLS keys and certificates to communicate with the control plane
|
||||
|
||||
|
||||
@@ -20,15 +20,15 @@
|
||||
|
||||
- Configuring routing tables in the cloud network (specific to GCE)
|
||||
|
||||
- Updating node labels to indicate region, zone, instance type ...
|
||||
- Updating node labels to indicate region, zone, instance type...
|
||||
|
||||
- Obtain node name, internal and external addresses from cloud metadata service
|
||||
|
||||
- Deleting nodes from Kubernetes when they're deleted in the cloud
|
||||
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks ...)
|
||||
- Managing *some* volumes (e.g. ELBs, AzureDisks...)
|
||||
|
||||
(Eventually, volumes will be managed by the CSI)
|
||||
(Eventually, volumes will be managed by the Container Storage Interface)
|
||||
|
||||
---
|
||||
|
||||
@@ -81,6 +81,16 @@ The list includes the following providers:
|
||||
|
||||
---
|
||||
|
||||
## Audience questions
|
||||
|
||||
- What kind of clouds are you using/planning to use?
|
||||
|
||||
- What kind of details would you like to see in this section?
|
||||
|
||||
- Would you appreciate details on clouds that you don't / won't use?
|
||||
|
||||
---
|
||||
|
||||
## Cloud Controller Manager in practice
|
||||
|
||||
- Write a configuration file
|
||||
@@ -95,7 +105,7 @@ The list includes the following providers:
|
||||
|
||||
- When using managed clusters, this is done automatically
|
||||
|
||||
- There is very little documentation to write the configuration file
|
||||
- There is very little documentation on writing the configuration file
|
||||
|
||||
(except for OpenStack)
|
||||
|
||||
@@ -113,7 +123,7 @@ The list includes the following providers:
|
||||
|
||||
- To get these addresses, the node needs to communicate with the control plane
|
||||
|
||||
- ... Which means joining the cluster
|
||||
- ...Which means joining the cluster
|
||||
|
||||
(The problem didn't occur when cloud-specific code was running in kubelet: kubelet could obtain the required information directly from the cloud provider's metadata service.)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
- error recovery (human or process has altered or corrupted data)
|
||||
|
||||
- cloning environments (for testing, validation ...)
|
||||
- cloning environments (for testing, validation...)
|
||||
|
||||
- Let's see the strategies and tools available with Kubernetes!
|
||||
|
||||
@@ -18,13 +18,13 @@
|
||||
|
||||
(it gives us replication primitives)
|
||||
|
||||
- Kubernetes helps us to clone / replicate environments
|
||||
- Kubernetes helps us to clone/replicate environments
|
||||
|
||||
(all resources can be described with manifests)
|
||||
|
||||
- Kubernetes *does not* help us with error recovery
|
||||
|
||||
- We still need to backup / snapshot our data:
|
||||
- We still need to back up/snapshot our data:
|
||||
|
||||
- with database backups (mysqldump, pgdump, etc.)
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
- If our deployment system isn't fully automated, it should at least be documented
|
||||
|
||||
- Litmus test: how long does it take to deploy a cluster ...
|
||||
- Litmus test: how long does it take to deploy a cluster...
|
||||
|
||||
- for a senior engineer?
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
|
||||
- Does it require external intervention?
|
||||
|
||||
(e.g. provisioning servers, signing TLS certs ...)
|
||||
(e.g. provisioning servers, signing TLS certs...)
|
||||
|
||||
---
|
||||
|
||||
@@ -108,7 +108,7 @@
|
||||
|
||||
- For real applications: add resources (as YAML files)
|
||||
|
||||
- For applications deployed multiple times: Helm, Kustomize ...
|
||||
- For applications deployed multiple times: Helm, Kustomize...
|
||||
|
||||
(staging and production count as "multiple times")
|
||||
|
||||
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh node $N sudo apt install kubelet=1.14.2-00
|
||||
ssh test$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh test$N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
|
||||
@@ -188,9 +188,13 @@ class: extra-details
|
||||
|
||||
- ... But this time, the controller manager will allocate `podCIDR` subnets
|
||||
|
||||
- We will start kube-router with a DaemonSet
|
||||
(so that we don't have to manually assign subnets to individual nodes)
|
||||
|
||||
- This DaemonSet will start one instance of kube-router on each node
|
||||
- We will create a DaemonSet for kube-router
|
||||
|
||||
- We will join nodes to the cluster
|
||||
|
||||
- The DaemonSet will automatically start a kube-router pod on each node
|
||||
|
||||
---
|
||||
|
||||
@@ -272,7 +276,7 @@ class: extra-details
|
||||
|
||||
- The address of the API server will be `http://A.B.C.D:8080`
|
||||
|
||||
(where `A.B.C.D` is the address of `kuberouter1`, running the control plane)
|
||||
(where `A.B.C.D` is the public address of `kuberouter1`, running the control plane)
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -300,12 +304,10 @@ Note: the DaemonSet won't create any pods (yet) since there are no nodes (yet).
|
||||
|
||||
- Generate the kubeconfig file (replacing `X.X.X.X` with the address of `kuberouter1`):
|
||||
```bash
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-context kubenet --cluster kubenet
|
||||
kubectl --kubeconfig ~/kubeconfig config\
|
||||
use-context kubenet
|
||||
kubectl config set-cluster cni --server http://`X.X.X.X`:8080
|
||||
kubectl config set-context cni --cluster cni
|
||||
kubectl config use-context cni
|
||||
cp ~/.kube/config ~/kubeconfig
|
||||
```
|
||||
|
||||
]
|
||||
@@ -451,7 +453,7 @@ We should see the local pod CIDR connected to `kube-bridge`, and the other nodes
|
||||
|
||||
- Or try to exec into one of the kube-router pods:
|
||||
```bash
|
||||
kubectl -n kube-system exec kuber-router-xxxxx bash
|
||||
kubectl -n kube-system exec kube-router-xxxxx bash
|
||||
```
|
||||
|
||||
]
|
||||
@@ -487,8 +489,8 @@ What does that mean?
|
||||
|
||||
- First, get the container ID, with `docker ps` or like this:
|
||||
```bash
|
||||
CID=$(docker ps
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system
|
||||
CID=$(docker ps -q \
|
||||
--filter label=io.kubernetes.pod.namespace=kube-system \
|
||||
--filter label=io.kubernetes.container.name=kube-router)
|
||||
```
|
||||
|
||||
@@ -573,7 +575,7 @@ done
|
||||
|
||||
## Starting the route reflector
|
||||
|
||||
- Only do this if you are doing this on your own
|
||||
- Only do this slide if you are doing this on your own
|
||||
|
||||
- There is a Compose file in the `compose/frr-route-reflector` directory
|
||||
|
||||
|
||||
367
slides/k8s/create-more-charts.md
Normal file
367
slides/k8s/create-more-charts.md
Normal file
@@ -0,0 +1,367 @@
|
||||
# Creating Helm charts
|
||||
|
||||
- We are going to create a generic Helm chart
|
||||
|
||||
- We will use that Helm chart to deploy DockerCoins
|
||||
|
||||
- Each component of DockerCoins will have its own *release*
|
||||
|
||||
- In other words, we will "install" that Helm chart multiple times
|
||||
|
||||
(one time per component of DockerCoins)
|
||||
|
||||
---
|
||||
|
||||
## Creating a generic chart
|
||||
|
||||
- Rather than starting from scratch, we will use `helm create`
|
||||
|
||||
- This will give us a basic chart that we will customize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a basic chart:
|
||||
```bash
|
||||
cd ~
|
||||
helm create helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This creates a basic chart in the directory `helmcoins`.
|
||||
|
||||
---
|
||||
|
||||
## What's in the basic chart?
|
||||
|
||||
- The basic chart will create a Deployment and a Service
|
||||
|
||||
- Optionally, it will also include an Ingress
|
||||
|
||||
- If we don't pass any values, it will deploy the `nginx` image
|
||||
|
||||
- We can override many things in that chart
|
||||
|
||||
- Let's try to deploy DockerCoins components with that chart!
|
||||
|
||||
---
|
||||
|
||||
## Writing `values.yaml` for our components
|
||||
|
||||
- We need to write one `values.yaml` file for each component
|
||||
|
||||
(hasher, redis, rng, webui, worker)
|
||||
|
||||
- We will start with the `values.yaml` of the chart, and remove what we don't need
|
||||
|
||||
- We will create 5 files:
|
||||
|
||||
hasher.yaml, redis.yaml, rng.yaml, webui.yaml, worker.yaml
|
||||
|
||||
---
|
||||
|
||||
## Getting started
|
||||
|
||||
- For component X, we want to use the image dockercoins/X:v0.1
|
||||
|
||||
(for instance, for rng, we want to use the image dockercoins/rng:v0.1)
|
||||
|
||||
- Exception: for redis, we want to use the official image redis:latest
|
||||
|
||||
.exercise[
|
||||
|
||||
- Write minimal YAML files for the 5 components, specifying only the image
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Hint: our YAML files should look like this.*
|
||||
|
||||
```yaml
|
||||
### rng.yaml
|
||||
image:
|
||||
repository: dockercoins/`rng`
|
||||
tag: v0.1
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying DockerCoins components
|
||||
|
||||
- For convenience, let's work in a separate namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace helmcoins
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns helmcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the chart
|
||||
|
||||
- To install a chart, we can use the following command:
|
||||
```bash
|
||||
helm install [--name `X`] <chart>
|
||||
```
|
||||
|
||||
- We can also use the following command, which is idempotent:
|
||||
```bash
|
||||
helm upgrade --install `X` chart
|
||||
```
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the 5 components of DockerCoins:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Let's see if DockerCoins is working!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the worker:
|
||||
```bash
|
||||
stern worker
|
||||
```
|
||||
|
||||
- Look at the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There are *many* issues to fix!
|
||||
|
||||
---
|
||||
|
||||
## Service names
|
||||
|
||||
- Our services should be named `rng`, `hasher`, etc., but they are named differently
|
||||
|
||||
- Look at the YAML template used for the services
|
||||
|
||||
- Does it look like we can override the name of the services?
|
||||
|
||||
--
|
||||
|
||||
- *Yes*, we can use `.Values.nameOverride`
|
||||
|
||||
- This means setting `nameOverride` in the values YAML file
|
||||
|
||||
---
|
||||
|
||||
## Setting service names
|
||||
|
||||
- Let's add `nameOverride: X` in each values YAML file!
|
||||
|
||||
(where X is hasher, redis, rng, etc.)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the 5 YAML files to add `nameOverride: X`
|
||||
|
||||
- Deploy the updated Chart:
|
||||
```bash
|
||||
for COMPONENT in hasher redis rng webui worker; do
|
||||
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
|
||||
done
|
||||
```
|
||||
(Yes, this is exactly the same command as before!)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the service names:
|
||||
```bash
|
||||
kubectl get services
|
||||
```
|
||||
Great! (We have a useless service for `worker`, but let's ignore it for now.)
|
||||
|
||||
- Check the state of the pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
Not so great... Some pods are *not ready.*
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting pods
|
||||
|
||||
- The easiest way to troubleshoot pods is to look at *events*
|
||||
|
||||
- We can look at all the events on the cluster (with `kubectl get events`)
|
||||
|
||||
- Or we can use `kubectl describe` on the objects that have problems
|
||||
|
||||
(`kubectl describe` will retrieve the events related to the object)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the events for the redis pods:
|
||||
```bash
|
||||
kubectl describe pod -l app.kubernetes.io/name=redis
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
What's going on?
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks
|
||||
|
||||
- The default chart defines healthchecks doing HTTP requests on port 80
|
||||
|
||||
- That won't work for redis and worker
|
||||
|
||||
(redis is not HTTP, and not on port 80; worker doesn't even listen)
|
||||
|
||||
--
|
||||
|
||||
- We could comment out the healthchecks
|
||||
|
||||
- We could also make them conditional
|
||||
|
||||
- This sounds more interesting, let's do that!
|
||||
|
||||
---
|
||||
|
||||
## Conditionals
|
||||
|
||||
- We need to enclose the healthcheck block with:
|
||||
|
||||
`{{ if CONDITION }}` at the beginning
|
||||
|
||||
`{{ end }}` at the end
|
||||
|
||||
- For the condition, we will use `.Values.healthcheck`
|
||||
|
||||
---
|
||||
|
||||
## Updating the deployment template
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- Before the healthchecks section (it starts with `livenessProbe:`), add:
|
||||
|
||||
`{{ if .Values.healthcheck }}`
|
||||
|
||||
- After the healthchecks section (just before `resources:`), add:
|
||||
|
||||
`{{ end }}`
|
||||
|
||||
- Edit `hasher.yaml`, `rng.yaml`, `webui.yaml` to add:
|
||||
|
||||
`healthcheck: true`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Update the deployed charts
|
||||
|
||||
- We can now apply the new templates (and the new values)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use the same command as earlier to upgrade all five components
|
||||
|
||||
- Use `kubectl describe` to confirm that `redis` starts correctly
|
||||
|
||||
- Use `kubectl describe` to confirm that `hasher` still has healthchecks
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is it working now?
|
||||
|
||||
- If we look at the worker logs, it appears that the worker is still stuck
|
||||
|
||||
- What could be happening?
|
||||
|
||||
--
|
||||
|
||||
- The redis service is not on port 80!
|
||||
|
||||
- We need to update the port number in redis.yaml
|
||||
|
||||
- We also need to update the port number in deployment.yaml
|
||||
|
||||
(it is hard-coded to 80 there)
|
||||
|
||||
---
|
||||
|
||||
## Setting the redis port
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `redis.yaml` to add:
|
||||
```yaml
|
||||
service:
|
||||
port: 6379
|
||||
```
|
||||
|
||||
- Edit `helmcoins/templates/deployment.yaml`
|
||||
|
||||
- The line with `containerPort` should be:
|
||||
```yaml
|
||||
containerPort: {{ .Values.service.port }}
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Apply changes
|
||||
|
||||
- Re-run the for loop to execute `helm upgrade` one more time
|
||||
|
||||
- Check the worker logs
|
||||
|
||||
- This time, it should be working!
|
||||
|
||||
---
|
||||
|
||||
## Extra steps
|
||||
|
||||
- We don't need to create a service for the worker
|
||||
|
||||
- We can put the whole service block in a conditional
|
||||
|
||||
(this will require additional changes in other files referencing the service)
|
||||
|
||||
- We can set the webui to be a NodePort service
|
||||
|
||||
- We can change the number of workers with `replicaCount`
|
||||
|
||||
- And much more!
|
||||
@@ -46,7 +46,7 @@
|
||||
|
||||
(and vice versa)
|
||||
|
||||
- If I use someone's public key to encrypt / decrypt their messages,
|
||||
- If I use someone's public key to encrypt/decrypt their messages,
|
||||
<br/>
|
||||
I can be certain that I am talking to them / they are talking to me
|
||||
|
||||
@@ -58,11 +58,11 @@
|
||||
|
||||
This is what I do if I want to obtain a certificate.
|
||||
|
||||
1. Create public and private key.
|
||||
1. Create public and private keys.
|
||||
|
||||
2. Create a Certificate Signing Request (CSR).
|
||||
|
||||
(The CSR contains the identity that I claim and an expiration date.)
|
||||
(The CSR contains the identity that I claim and a public key.)
|
||||
|
||||
3. Send that CSR to the Certificate Authority (CA).
|
||||
|
||||
@@ -84,7 +84,7 @@ The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
(= upload a CSR to the Kubernetes API)
|
||||
|
||||
- Then, using the Kubernetes API, we can approve / deny the request
|
||||
- Then, using the Kubernetes API, we can approve/deny the request
|
||||
|
||||
- If we approve the request, the Kubernetes API generates a certificate
|
||||
|
||||
@@ -122,7 +122,7 @@ The CA (or anyone else) never needs to know my private key.
|
||||
|
||||
- Users can then retrieve their certificate from their CSR object
|
||||
|
||||
- ... And use that certificate for subsequent interactions
|
||||
- ...And use that certificate for subsequent interactions
|
||||
|
||||
---
|
||||
|
||||
@@ -231,7 +231,7 @@ For a user named `jean.doe`, we will have:
|
||||
- Let's use OpenSSL; it's not the best one, but it's installed everywhere
|
||||
|
||||
(many people prefer cfssl, easyrsa, or other tools; that's fine too!)
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the key and certificate signing request:
|
||||
@@ -244,7 +244,7 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
The command above generates:
|
||||
|
||||
- a 2048-bit RSA key, without DES encryption, stored in key.pem
|
||||
- a 2048-bit RSA key, without encryption, stored in key.pem
|
||||
- a CSR for the name `jean.doe` in group `devs`
|
||||
|
||||
---
|
||||
@@ -345,7 +345,7 @@ The command above generates:
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Retrieve the certificate from the CSR:
|
||||
- Retrieve the updated CSR object and extract the certificate:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
@@ -387,7 +387,7 @@ The command above generates:
|
||||
|
||||
## What's missing?
|
||||
|
||||
We shown, step by step, a method to issue short-lived certificates for users.
|
||||
We have just shown, step by step, a method to issue short-lived certificates for users.
|
||||
|
||||
To be usable in real environments, we would need to add:
|
||||
|
||||
@@ -417,7 +417,7 @@ To be usable in real environments, we would need to add:
|
||||
|
||||
- This provides enhanced security:
|
||||
|
||||
- the long-term credentials can use long passphrases, 2FA, HSM ...
|
||||
- the long-term credentials can use long passphrases, 2FA, HSM...
|
||||
|
||||
- the short-term credentials are more convenient to use
|
||||
|
||||
|
||||
@@ -117,7 +117,7 @@ Examples:
|
||||
|
||||
## Admission controllers
|
||||
|
||||
- When a Pod is created, it is associated to a ServiceAccount
|
||||
- When a Pod is created, it is associated with a ServiceAccount
|
||||
|
||||
(even if we did not specify one explicitly)
|
||||
|
||||
@@ -163,7 +163,7 @@ class: pic
|
||||
|
||||
- These webhooks can be *validating* or *mutating*
|
||||
|
||||
- Webhooks can be setup dynamically (without restarting the API server)
|
||||
- Webhooks can be set up dynamically (without restarting the API server)
|
||||
|
||||
- To setup a dynamic admission webhook, we create a special resource:
|
||||
|
||||
@@ -171,7 +171,7 @@ class: pic
|
||||
|
||||
- These resources are created and managed like other resources
|
||||
|
||||
(i.e. `kubectl create`, `kubectl get` ...)
|
||||
(i.e. `kubectl create`, `kubectl get`...)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -6,15 +6,15 @@
|
||||
|
||||
- Horizontal scaling = changing the number of replicas
|
||||
|
||||
(adding / removing pods)
|
||||
(adding/removing pods)
|
||||
|
||||
- Vertical scaling = changing the size of individual replicas
|
||||
|
||||
(increasing / reducing CPU and RAM per pod)
|
||||
(increasing/reducing CPU and RAM per pod)
|
||||
|
||||
- Cluster scaling = changing the size of the cluster
|
||||
|
||||
(adding / removing nodes)
|
||||
(adding/removing nodes)
|
||||
|
||||
---
|
||||
|
||||
@@ -50,9 +50,9 @@
|
||||
|
||||
- The latter actually makes a lot of sense:
|
||||
|
||||
- if a Pod doesn't have a CPU request, it might be using 10% of CPU ...
|
||||
- if a Pod doesn't have a CPU request, it might be using 10% of CPU...
|
||||
|
||||
- ... but only because there is no CPU time available!
|
||||
- ...but only because there is no CPU time available!
|
||||
|
||||
- this makes sure that we won't add pods to nodes that are already resource-starved
|
||||
|
||||
@@ -238,7 +238,7 @@ This can also be set with `--cpu-percent=`.
|
||||
|
||||
- Kubernetes doesn't implement any of these API groups
|
||||
|
||||
- Using these metrics requires to [register additional APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis)
|
||||
- Using these metrics requires [registering additional APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis)
|
||||
|
||||
- The metrics provided by metrics server are standard; everything else is custom
|
||||
|
||||
|
||||
244
slides/k8s/kubercoins.md
Normal file
244
slides/k8s/kubercoins.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Deploying a sample application
|
||||
|
||||
- We will connect to our new Kubernetes cluster
|
||||
|
||||
- We will deploy a sample application, "DockerCoins"
|
||||
|
||||
- That app features multiple micro-services and a web UI
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our Kubernetes cluster
|
||||
|
||||
- Our cluster has multiple nodes named `node1`, `node2`, etc.
|
||||
|
||||
- We will do everything from `node1`
|
||||
|
||||
- We have SSH access to the other nodes, but won't need it
|
||||
|
||||
(but we can use it for debugging, troubleshooting, etc.)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into `node1`
|
||||
|
||||
- Check that all nodes are `Ready`:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cloning some repos
|
||||
|
||||
- We will need two repositories:
|
||||
|
||||
- the first one has the "DockerCoins" demo app
|
||||
|
||||
- the second one has these slides, some scripts, more manifests ...
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone the kubercoins repository on `node1`:
|
||||
```bash
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
|
||||
- Clone the container.training repository as well:
|
||||
```bash
|
||||
git clone https://@@GITREPO@@
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Running the application
|
||||
|
||||
Without further ado, let's start this application!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply all the manifests from the kubercoins repository:
|
||||
```bash
|
||||
kubectl apply -f kubercoins/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
--
|
||||
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
|
||||
--
|
||||
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
|
||||
--
|
||||
|
||||
- How DockerCoins works:
|
||||
|
||||
- generate a few random bytes
|
||||
|
||||
- hash these bytes
|
||||
|
||||
- increment a counter (to keep track of speed)
|
||||
|
||||
- repeat forever!
|
||||
|
||||
--
|
||||
|
||||
- DockerCoins is *not* a cryptocurrency
|
||||
|
||||
(the only common points are "randomness", "hashing", and "coins" in the name)
|
||||
|
||||
---
|
||||
|
||||
## DockerCoins in the microservices era
|
||||
|
||||
- DockerCoins is made of 5 services:
|
||||
|
||||
- `rng` = web service generating random bytes
|
||||
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
|
||||
- `worker` = background process calling `rng` and `hasher`
|
||||
|
||||
- `webui` = web interface to watch progress
|
||||
|
||||
- `redis` = data store (holds a counter updated by `worker`)
|
||||
|
||||
- These 5 services are visible in the application's Compose file,
|
||||
[docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)
|
||||
|
||||
---
|
||||
|
||||
## How DockerCoins works
|
||||
|
||||
- `worker` invokes web service `rng` to generate random bytes
|
||||
|
||||
- `worker` invokes web service `hasher` to hash these bytes
|
||||
|
||||
- `worker` does this in an infinite loop
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser
|
||||
|
||||
*(See diagram on next slide!)*
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Service discovery in container-land
|
||||
|
||||
How does each service find out the address of the other ones?
|
||||
|
||||
--
|
||||
|
||||
- We do not hard-code IP addresses in the code
|
||||
|
||||
- We do not hard-code FQDNs in the code, either
|
||||
|
||||
- We just connect to a service name, and container-magic does the rest
|
||||
|
||||
(And by container-magic, we mean "a crafty, dynamic, embedded DNS server")
|
||||
|
||||
---
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||
```python
|
||||
redis = Redis("`redis`")
|
||||
|
||||
|
||||
def get_random_bytes():
|
||||
r = requests.get("http://`rng`/32")
|
||||
return r.content
|
||||
|
||||
|
||||
def hash_bytes(data):
|
||||
r = requests.post("http://`hasher`/",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/octet-stream"})
|
||||
```
|
||||
|
||||
(Full source code available [here](
|
||||
https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
## Show me the code!
|
||||
|
||||
- You can check the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://@@GITREPO@@
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://@@GITREPO@@/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
- The Compose file ([docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml))
|
||||
lists all 5 services
|
||||
|
||||
- `redis` is using an official image from the Docker Hub
|
||||
|
||||
- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile
|
||||
|
||||
- Each service's Dockerfile and source code is in its own directory
|
||||
|
||||
(`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory,
|
||||
`rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/)
|
||||
directory, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Our application at work
|
||||
|
||||
- We can check the logs of our application's pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of the various components:
|
||||
```bash
|
||||
kubectl logs deploy/worker
|
||||
kubectl logs deploy/hasher
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the web UI
|
||||
|
||||
- "Logs are exciting and fun!" (No-one, ever)
|
||||
|
||||
- The `webui` container exposes a web dashboard; let's view it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the NodePort allocated to the web UI:
|
||||
```bash
|
||||
kubectl get svc webui
|
||||
```
|
||||
|
||||
- Open that in a web browser
|
||||
|
||||
]
|
||||
|
||||
A drawing area should show up, and after a few seconds, a blue
|
||||
graph will appear.
|
||||
@@ -48,7 +48,7 @@
|
||||
|
||||
- Acknowledge that a lot of tasks are outsourced
|
||||
|
||||
(e.g. if we add "buy / rack / provision machines" in that list)
|
||||
(e.g. if we add "buy/rack/provision machines" in that list)
|
||||
|
||||
---
|
||||
|
||||
@@ -122,7 +122,7 @@
|
||||
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
✔️ parity between dev and prod environments
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
|
||||
- do we reward on-call duty without encouraging hero syndrome?
|
||||
|
||||
- do we give resources (time, money) to people to learn?
|
||||
- do we give people resources (time, money) to learn?
|
||||
|
||||
---
|
||||
|
||||
@@ -183,9 +183,9 @@ are a few tools that can help us.*
|
||||
|
||||
- If cloud: public vs. private
|
||||
|
||||
- Which vendor / distribution to pick?
|
||||
- Which vendor/distribution to pick?
|
||||
|
||||
- Which versions / features to enable?
|
||||
- Which versions/features to enable?
|
||||
|
||||
---
|
||||
|
||||
@@ -205,6 +205,6 @@ are a few tools that can help us.*
|
||||
|
||||
- Transfer knowledge
|
||||
|
||||
(make sure everyone is on the same page / same level)
|
||||
(make sure everyone is on the same page/level)
|
||||
|
||||
- Iterate!
|
||||
|
||||
@@ -73,12 +73,12 @@ and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
- Fluentd runs on each node (thanks to a daemon set)
|
||||
|
||||
- It binds-mounts `/var/log/containers` from the host (to access these files)
|
||||
- It bind-mounts `/var/log/containers` from the host (to access these files)
|
||||
|
||||
- It continuously scans this directory for new files; reads them; parses them
|
||||
|
||||
- Each log line becomes a JSON object, fully annotated with extra information:
|
||||
<br/>container id, pod name, Kubernetes labels ...
|
||||
<br/>container id, pod name, Kubernetes labels...
|
||||
|
||||
- These JSON objects are stored in ElasticSearch
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Accessing logs from the CLI
|
||||
|
||||
- The `kubectl logs` commands has limitations:
|
||||
- The `kubectl logs` command has limitations:
|
||||
|
||||
- it cannot stream logs from multiple pods at a time
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
## Doing it manually
|
||||
|
||||
- We *could* (if we were so inclined), write a program or script that would:
|
||||
- We *could* (if we were so inclined) write a program or script that would:
|
||||
|
||||
- take a selector as an argument
|
||||
|
||||
@@ -72,11 +72,11 @@ Exactly what we need!
|
||||
|
||||
## Using Stern
|
||||
|
||||
- There are two ways to specify the pods for which we want to see the logs:
|
||||
- There are two ways to specify the pods whose logs we want to see:
|
||||
|
||||
- `-l` followed by a selector expression (like with many `kubectl` commands)
|
||||
|
||||
- with a "pod query", i.e. a regex used to match pod names
|
||||
- with a "pod query," i.e. a regex used to match pod names
|
||||
|
||||
- These two ways can be combined if necessary
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ class: extra-details
|
||||
|
||||
- We need to generate a `kubeconfig` file for kubelet
|
||||
|
||||
- This time, we need to put the IP address of `kubenet1`
|
||||
- This time, we need to put the public IP address of `kubenet1`
|
||||
|
||||
(instead of `localhost` or `127.0.0.1`)
|
||||
|
||||
@@ -104,12 +104,10 @@ class: extra-details
|
||||
|
||||
- Generate the `kubeconfig` file:
|
||||
```bash
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl --kubeconfig ~/kubeconfig config \
|
||||
set-context kubenet --cluster kubenet
|
||||
kubectl --kubeconfig ~/kubeconfig config\
|
||||
use-context kubenet
|
||||
kubectl config set-cluster kubenet --server http://`X.X.X.X`:8080
|
||||
kubectl config set-context kubenet --cluster kubenet
|
||||
kubectl config use-context kubenet
|
||||
cp ~/.kube/config ~/kubeconfig
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -307,7 +307,7 @@ This policy selects all pods in the current namespace.
|
||||
|
||||
It allows traffic only from pods in the current namespace.
|
||||
|
||||
(An empty `podSelector` means "all pods".)
|
||||
(An empty `podSelector` means "all pods.")
|
||||
|
||||
```yaml
|
||||
kind: NetworkPolicy
|
||||
@@ -329,7 +329,7 @@ This policy selects all pods with label `app=webui`.
|
||||
|
||||
It allows traffic from any source.
|
||||
|
||||
(An empty `from` fields means "all sources".)
|
||||
(An empty `from` field means "all sources.")
|
||||
|
||||
```yaml
|
||||
kind: NetworkPolicy
|
||||
@@ -412,7 +412,7 @@ troubleshoot easily, without having to poke holes in our firewall.
|
||||
|
||||
- If we block access to the control plane, we might disrupt legitimate code
|
||||
|
||||
- ... Without necessarily improving security
|
||||
- ...Without necessarily improving security
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,377 +0,0 @@
|
||||
# OpenID Connect
|
||||
|
||||
- The Kubernetes API server can perform authentication with OpenID connect
|
||||
|
||||
- This requires an *OpenID provider*
|
||||
|
||||
(external authorization server using the OAuth 2.0 protocol)
|
||||
|
||||
- We can use a third-party provider (e.g. Google) or run our own (e.g. Dex)
|
||||
|
||||
- We are going to give an overview of the protocol
|
||||
|
||||
- We will show it in action (in a simplified scenario)
|
||||
|
||||
---
|
||||
|
||||
## Workflow overview
|
||||
|
||||
- We want to access our resources (a Kubernetes cluster)
|
||||
|
||||
- We authenticate with the OpenID provider
|
||||
|
||||
- we can do this directly (e.g. by going to https://accounts.google.com)
|
||||
|
||||
- or maybe a kubectl plugin can open a browser page on our behalf
|
||||
|
||||
- After authenticating us, the OpenID provider gives us:
|
||||
|
||||
- an *id token* (a short-lived signed JSON Web Token, see next slide)
|
||||
|
||||
- a *refresh token* (to renew the previous one when needed)
|
||||
|
||||
- We can now issue requests to the Kubernetes API with the *id token*
|
||||
|
||||
- The API server will verify that token's content to authenticate us
|
||||
|
||||
---
|
||||
|
||||
## JSON Web Tokens
|
||||
|
||||
- A JSON Web Token (JWT) has three parts:
|
||||
|
||||
- a header specifying algorithms and token type
|
||||
|
||||
- a payload (indicating who issued the token, for whom, which purposes...)
|
||||
|
||||
- a signature generated by the issuer (the issuer = the OpenID provider)
|
||||
|
||||
- Anyone can verify a JWT without contacting the issuer
|
||||
|
||||
(except to obtain the issuer's public key)
|
||||
|
||||
- Pro tip: we can inspect a JWT with https://jwt.io/
|
||||
|
||||
---
|
||||
|
||||
## How the Kubernetes API uses JWT
|
||||
|
||||
- Server side
|
||||
|
||||
- enable OIDC authentication
|
||||
|
||||
- indicate which issuer (provider) should be allowed
|
||||
|
||||
- indicate which audience (or "client id") should be allowed
|
||||
|
||||
- if necessary, map or prefix user and group names
|
||||
|
||||
- Client side
|
||||
|
||||
- obtain JWT as described earlier
|
||||
|
||||
- pass JWT as authentication token
|
||||
|
||||
- renew JWT when needed (using the refresh token)
|
||||
|
||||
---
|
||||
|
||||
## Demo time!
|
||||
|
||||
- We will use [Google Accounts](https://accounts.google.com) as our OpenID provider
|
||||
|
||||
- We will use the [Google OAuth Playground](https://developers.google.com/oauthplayground) as the "audience" or "client id"
|
||||
|
||||
- We will obtain a JWT through Google Accounts and the OAuth Playground
|
||||
|
||||
- We will enable OIDC in the Kubernetes API server
|
||||
|
||||
- We will use the JWT to authenticate
|
||||
|
||||
.footnote[If you can't or won't use a Google Account, you can try to adapt to another provider.]
|
||||
|
||||
---
|
||||
|
||||
## Checking the API server logs
|
||||
|
||||
- The API server logs will be particularly useful in this section
|
||||
|
||||
(they will indicate e.g. why a specific token is rejected)
|
||||
|
||||
- Let's keep an eye on the API server output!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tail the logs of the API server:
|
||||
```bash
|
||||
kubectl logs kube-apiserver-node1 --follow --namespace=kube-system
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authenticate with the OpenID provider
|
||||
|
||||
- We will use the Google OAuth Playground for convenience
|
||||
|
||||
- In a real scenario, we would need our own OAuth client instead of the playground
|
||||
|
||||
(even if we were still using Google as the OpenID provider)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Open the Google OAuth Playground:
|
||||
```
|
||||
https://developers.google.com/oauthplayground/
|
||||
```
|
||||
|
||||
- Enter our own custom scope in the text field:
|
||||
```
|
||||
https://www.googleapis.com/auth/userinfo.email
|
||||
```
|
||||
|
||||
- Click on "Authorize APIs" and allow the playground to access our email address
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtain our JSON Web Token
|
||||
|
||||
- The previous step gave us an "authorization code"
|
||||
|
||||
- We will use it to obtain tokens
|
||||
|
||||
.exercise[
|
||||
|
||||
- Click on "Exchange authorization code for tokens"
|
||||
|
||||
]
|
||||
|
||||
- The JWT is the very long `id_token` that shows up on the right hand side
|
||||
|
||||
(it is a base64-encoded JSON object, and should therefore start with `eyJ`)
|
||||
|
||||
---
|
||||
|
||||
## Using our JSON Web Token
|
||||
|
||||
- We need to create a context (in kubeconfig) for our token
|
||||
|
||||
(if we just add the token or use `kubectl --token`, our certificate will still be used)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new authentication section in kubeconfig:
|
||||
```bash
|
||||
kubectl config set-credentials myjwt --token=eyJ...
|
||||
```
|
||||
|
||||
- Try to use it:
|
||||
```bash
|
||||
kubectl --user=myjwt get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should get an `Unauthorized` response, since we haven't enabled OpenID Connect in the API server yet. We should also see `invalid bearer token` in the API server log output.
|
||||
|
||||
---
|
||||
|
||||
## Enabling OpenID Connect
|
||||
|
||||
- We need to add a few flags to the API server configuration
|
||||
|
||||
- These two are mandatory:
|
||||
|
||||
`--oidc-issuer-url` → URL of the OpenID provider
|
||||
|
||||
`--oidc-client-id` → app requesting the authentication
|
||||
<br/>(in our case, that's the ID for the Google OAuth Playground)
|
||||
|
||||
- This one is optional:
|
||||
|
||||
`--oidc-username-claim` → which field should be used as user name
|
||||
<br/>(we will use the user's email address instead of an opaque ID)
|
||||
|
||||
- See the [API server documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server
|
||||
) for more details about all available flags
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server configuration
|
||||
|
||||
- The instructions below will work for clusters deployed with kubeadm
|
||||
|
||||
(or where the control plane is deployed in static pods)
|
||||
|
||||
- If your cluster is different, you will need to adapt them
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`
|
||||
|
||||
- Add the following lines to the list of command-line flags:
|
||||
```yaml
|
||||
- --oidc-issuer-url=https://accounts.google.com
|
||||
- --oidc-client-id=407408718192.apps.googleusercontent.com
|
||||
- --oidc-username-claim=email
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Restarting the API server
|
||||
|
||||
- The kubelet monitors the files in `/etc/kubernetes/manifests`
|
||||
|
||||
- When we save the pod manifest, kubelet will restart the corresponding pod
|
||||
|
||||
(using the updated command line flags)
|
||||
|
||||
.exercise[
|
||||
|
||||
- After making the changes described on the previous slide, save the file
|
||||
|
||||
- Issue a simple command (like `kubectl version`) until the API server is back up
|
||||
|
||||
(it might take between a few seconds and one minute for the API server to restart)
|
||||
|
||||
- Restart the `kubectl logs` command to view the logs of the API server
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using our JSON Web Token
|
||||
|
||||
- Now that the API server is set up to recognize our token, try again!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try an API command with our token:
|
||||
```bash
|
||||
kubectl --user=myjwt get nodes
|
||||
kubectl --user=myjwt get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a message like:
|
||||
```
|
||||
Error from server (Forbidden): nodes is forbidden: User "jean.doe@gmail.com"
|
||||
cannot list resource "nodes" in API group "" at the cluster scope
|
||||
```
|
||||
|
||||
→ We were successfully *authenticated*, but not *authorized*.
|
||||
|
||||
---
|
||||
|
||||
## Authorizing our user
|
||||
|
||||
- As an extra step, let's grant read access to our user
|
||||
|
||||
- We will use the pre-defined ClusterRole `view`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a ClusterRoleBinding allowing us read access to the cluster:
|
||||
```bash
|
||||
kubectl create clusterrolebinding i-can-view \
|
||||
--user=`jean.doe@gmail.com` --clusterrole=view
|
||||
```
|
||||
|
||||
- Confirm that we can now list pods with our token:
|
||||
```bash
|
||||
kubectl --user=myjwt get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## From demo to production
|
||||
|
||||
.warning[This was a very simplified demo! In a real deployment...]
|
||||
|
||||
- We wouldn't use the Google OAuth Playground
|
||||
|
||||
- We *probably* wouldn't even use Google at all
|
||||
|
||||
(it doesn't seem to provide a way to include groups!)
|
||||
|
||||
- Some popular alternatives:
|
||||
|
||||
- [Dex](https://github.com/dexidp/dex),
|
||||
[Keycloak](https://www.keycloak.org/)
|
||||
(self-hosted)
|
||||
|
||||
- [Okta](https://developer.okta.com/docs/how-to/creating-token-with-groups-claim/#step-five-decode-the-jwt-to-verify)
|
||||
(SaaS)
|
||||
|
||||
- We would use a helper (like the [kubelogin](https://github.com/int128/kubelogin) plugin) to automatically obtain tokens
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Service Account tokens
|
||||
|
||||
- The tokens used by Service Accounts are JWT tokens as well
|
||||
|
||||
- They are signed and verified using a special service account key pair
|
||||
|
||||
.exercise[
|
||||
|
||||
- Extract the token of a service account in the current namespace:
|
||||
```bash
|
||||
kubectl get secrets -o jsonpath={..token} | base64 -d
|
||||
```
|
||||
|
||||
- Copy-paste the token to a verification service like https://jwt.io
|
||||
|
||||
- Notice that it says "Invalid Signature"
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Verifying Service Account tokens
|
||||
|
||||
- JSON Web Tokens embed the URL of the "issuer" (=OpenID provider)
|
||||
|
||||
- The issuer provides its public key through a well-known discovery endpoint
|
||||
|
||||
(similar to https://accounts.google.com/.well-known/openid-configuration)
|
||||
|
||||
- There is no such endpoint for the Service Account key pair
|
||||
|
||||
- But we can provide the public key ourselves for verification
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Verifying a Service Account token
|
||||
|
||||
- On clusters provisioned with kubeadm, the Service Account key pair is:
|
||||
|
||||
`/etc/kubernetes/pki/sa.key` (used by the controller manager to generate tokens)
|
||||
|
||||
`/etc/kubernetes/pki/sa.pub` (used by the API server to validate the same tokens)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the public key used to sign Service Account tokens:
|
||||
```bash
|
||||
sudo cat /etc/kubernetes/pki/sa.pub
|
||||
```
|
||||
|
||||
- Copy-paste the key in the "verify signature" area on https://jwt.io
|
||||
|
||||
- It should now say "Signature Verified"
|
||||
|
||||
]
|
||||
@@ -1,4 +1,4 @@
|
||||
## Operator design (extra material)
|
||||
## What does it take to write an operator?
|
||||
|
||||
- Writing a quick-and-dirty operator, or a POC/MVP, is easy
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Owners and dependents (extra material)
|
||||
# Owners and dependents
|
||||
|
||||
- Some objects are created by other objects
|
||||
|
||||
|
||||
@@ -8,12 +8,18 @@
|
||||
|
||||
- Then we will explain how to avoid this with PodSecurityPolicies
|
||||
|
||||
- We will illustrate this by creating a non-privileged user limited to a namespace
|
||||
- We will enable PodSecurityPolicies on our cluster
|
||||
|
||||
- We will create a couple of policies (restricted and permissive)
|
||||
|
||||
- Finally we will see how to use them to improve security on our cluster
|
||||
|
||||
---
|
||||
|
||||
## Setting up a namespace
|
||||
|
||||
- For simplicity, let's work in a separate namespace
|
||||
|
||||
- Let's create a new namespace called "green"
|
||||
|
||||
.exercise[
|
||||
@@ -32,168 +38,9 @@
|
||||
|
||||
---
|
||||
|
||||
## Using limited credentials
|
||||
|
||||
- When a namespace is created, a `default` ServiceAccount is added
|
||||
|
||||
- By default, this ServiceAccount doesn't have any access rights
|
||||
|
||||
- We will use this ServiceAccount as our non-privileged user
|
||||
|
||||
- We will obtain this ServiceAccount's token and add it to a context
|
||||
|
||||
- Then we will give basic access rights to this ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the ServiceAccount's token
|
||||
|
||||
- The token is stored in a Secret
|
||||
|
||||
- The Secret is listed in the ServiceAccount
|
||||
|
||||
.exercise[
|
||||
|
||||
- Obtain the name of the Secret from the ServiceAccount::
|
||||
```bash
|
||||
SECRET=$(kubectl get sa default -o jsonpath={.secrets[0].name})
|
||||
```
|
||||
|
||||
- Extract the token from the Secret object:
|
||||
```bash
|
||||
TOKEN=$(kubectl get secrets $SECRET -o jsonpath={.data.token}
|
||||
| base64 -d)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Inspecting a Kubernetes token
|
||||
|
||||
- Kubernetes tokens are JSON Web Tokens
|
||||
|
||||
(as defined by [RFC 7519](https://tools.ietf.org/html/rfc7519))
|
||||
|
||||
- We can view their content (and even verify them) easily
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the token that we obtained:
|
||||
```bash
|
||||
echo $TOKEN
|
||||
```
|
||||
|
||||
- Copy paste the token in the verification form on https://jwt.io
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authenticating using the ServiceAccount token
|
||||
|
||||
- Let's create a new *context* accessing our cluster with that token
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, add the token credentials to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials green --token=$TOKEN
|
||||
```
|
||||
|
||||
- Then, create a new context using these credentials:
|
||||
```bash
|
||||
kubectl config set-context green --user=green --cluster=kubernetes
|
||||
```
|
||||
|
||||
- Check the results:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the new context
|
||||
|
||||
- Normally, this context doesn't let us access *anything* (yet)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change to the new context with one of these two commands:
|
||||
```bash
|
||||
kctx green
|
||||
kubectl config use-context green
|
||||
```
|
||||
|
||||
- Also change to the green namespace in that context:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
- Confirm that we don't have access to anything:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Giving basic access rights
|
||||
|
||||
- Let's bind the ClusterRole `edit` to our ServiceAccount
|
||||
|
||||
- To allow access only to the namespace, we use a RoleBinding
|
||||
|
||||
(instead of a ClusterRoleBinding, which would give global access)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Create the Role Binding:
|
||||
```bash
|
||||
kubectl create rolebinding green --clusterrole=edit --serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying access rights
|
||||
|
||||
- Let's switch back to the `green` context and check that we have rights
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `green`:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Check our permissions:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see an empty list.
|
||||
|
||||
(Better than a series of permission errors!)
|
||||
|
||||
---
|
||||
|
||||
## Creating a basic Deployment
|
||||
|
||||
- Just to demonstrate that everything works correctly, deploy NGINX
|
||||
- Just to check that everything works correctly, deploy NGINX
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -202,7 +49,7 @@ We should see an empty list.
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment, ReplicaSet, and Pod exist, and Pod is running:
|
||||
- Confirm that the Deployment, ReplicaSet, and Pod exist, and that the Pod is running:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
@@ -316,7 +163,7 @@ We should see an empty list.
|
||||
- If we create a Pod directly, it can use a PSP to which *we* have access
|
||||
|
||||
- If the Pod is created by e.g. a ReplicaSet or DaemonSet, it's different:
|
||||
|
||||
|
||||
- the ReplicaSet / DaemonSet controllers don't have access to *our* policies
|
||||
|
||||
- therefore, we need to give access to the PSP to the Pod's ServiceAccount
|
||||
@@ -331,7 +178,7 @@ We should see an empty list.
|
||||
|
||||
- Then we will create a couple of PodSecurityPolicies
|
||||
|
||||
- ... And associated ClusterRoles (giving `use` access to the policies)
|
||||
- ...And associated ClusterRoles (giving `use` access to the policies)
|
||||
|
||||
- Then we will create RoleBindings to grant these roles to ServiceAccounts
|
||||
|
||||
@@ -365,7 +212,7 @@ We should see an empty list.
|
||||
|
||||
- Have a look at the static pods:
|
||||
```bash
|
||||
ls -l /etc/kubernetes/manifest
|
||||
ls -l /etc/kubernetes/manifests
|
||||
```
|
||||
|
||||
- Edit the one corresponding to the API server:
|
||||
@@ -389,7 +236,7 @@ We should see an empty list.
|
||||
|
||||
- Add `PodSecurityPolicy`
|
||||
|
||||
(It should read `--enable-admission-plugins=NodeRestriction,PodSecurityPolicy`)
|
||||
It should read: `--enable-admission-plugins=NodeRestriction,PodSecurityPolicy`
|
||||
|
||||
- Save, quit
|
||||
|
||||
@@ -474,12 +321,65 @@ We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
---
|
||||
|
||||
## Check that we can create Pods again
|
||||
|
||||
- We haven't bound the policy to any user yet
|
||||
|
||||
- But `cluster-admin` can implicitly `use` all policies
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we can now create a Pod directly:
|
||||
```bash
|
||||
kubectl run testpsp3 --image=nginx --restart=Never
|
||||
```
|
||||
|
||||
- Create a Deployment as well:
|
||||
```bash
|
||||
kubectl run testpsp4 --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment is *not* creating any Pods:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's going on?
|
||||
|
||||
- We can create Pods directly (thanks to our root-like permissions)
|
||||
|
||||
- The Pods corresponding to a Deployment are created by the ReplicaSet controller
|
||||
|
||||
- The ReplicaSet controller does *not* have root-like permissions
|
||||
|
||||
- We need to either:
|
||||
|
||||
- grant permissions to the ReplicaSet controller
|
||||
|
||||
*or*
|
||||
|
||||
- grant permissions to our Pods' ServiceAccount
|
||||
|
||||
- The first option would allow *anyone* to create pods
|
||||
|
||||
- The second option will allow us to scope the permissions better
|
||||
|
||||
---
|
||||
|
||||
## Binding the restricted policy
|
||||
|
||||
- Let's bind the role `psp:restricted` to ServiceAccount `green:default`
|
||||
|
||||
(aka the default ServiceAccount in the green Namespace)
|
||||
|
||||
- This will allow Pod creation in the green Namespace
|
||||
|
||||
(because these Pods will be using that ServiceAccount automatically)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the following RoleBinding:
|
||||
@@ -495,18 +395,17 @@ We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
## Trying it out
|
||||
|
||||
- Let's switch to the `green` context, and try to create resources
|
||||
- The Deployments that we created earlier will *eventually* recover
|
||||
|
||||
(the ReplicaSet controller will retry to create Pods once in a while)
|
||||
|
||||
- If we create a new Deployment now, it should work immediately
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch to the `green` context:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Create a simple Deployment:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
kubectl create deployment testpsp5 --image=nginx
|
||||
```
|
||||
|
||||
- Look at the Pods that have been created:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Highly available Persistent Volumes
|
||||
# Highly available Persistent Volumes (extra material)
|
||||
|
||||
- How can we achieve true durability?
|
||||
|
||||
|
||||
@@ -1,8 +1,52 @@
|
||||
# Additional lab environments
|
||||
# Pre-requirements
|
||||
|
||||
- We will now use new sets of VMs
|
||||
- Kubernetes concepts
|
||||
|
||||
- Look out for new individual card with connection information!
|
||||
(pods, deployments, services, labels, selectors)
|
||||
|
||||
- Hands-on experience working with containers
|
||||
|
||||
(building images, running them; doesn't matter how exactly)
|
||||
|
||||
- Familiar with the UNIX command-line
|
||||
|
||||
(navigating directories, editing files, using `kubectl`)
|
||||
|
||||
---
|
||||
|
||||
## Labs and exercises
|
||||
|
||||
- We are going to build and break multiple clusters
|
||||
|
||||
- Everyone will get their own private environment(s)
|
||||
|
||||
- You are invited to reproduce all the demos (but you don't have to)
|
||||
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
.exercise[
|
||||
|
||||
- This is the stuff you're supposed to do!
|
||||
|
||||
- Go to @@SLIDES@@ to view these slides
|
||||
|
||||
- Join the chat room: @@CHAT@@
|
||||
|
||||
<!-- ```open @@SLIDES@@``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Private environments
|
||||
|
||||
- Each person gets their own private set of VMs
|
||||
|
||||
- Each person should have a printed card with connection information
|
||||
|
||||
- We will connect to these VMs with SSH
|
||||
|
||||
(if you don't have an SSH client, install one **now!**)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
- We don't endorse Prometheus more or less than any other system
|
||||
|
||||
- It's relatively well integrated within the Cloud Native ecosystem
|
||||
- It's relatively well integrated within the cloud-native ecosystem
|
||||
|
||||
- It can be self-hosted (this is useful for tutorials like this)
|
||||
|
||||
@@ -182,7 +182,7 @@ We need to:
|
||||
|
||||
- Run the *node exporter* on each node (with a Daemon Set)
|
||||
|
||||
- Setup a Service Account so that Prometheus can query the Kubernetes API
|
||||
- Set up a Service Account so that Prometheus can query the Kubernetes API
|
||||
|
||||
- Configure the Prometheus server
|
||||
|
||||
@@ -250,7 +250,7 @@ class: extra-details
|
||||
|
||||
## Explaining all the Helm flags
|
||||
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version ...
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version...
|
||||
|
||||
(a "release" is a unique name given to an app deployed with Helm)
|
||||
|
||||
@@ -288,7 +288,7 @@ class: extra-details
|
||||
|
||||
## Querying some metrics
|
||||
|
||||
- This is easy ... if you are familiar with PromQL
|
||||
- This is easy... if you are familiar with PromQL
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -433,9 +433,9 @@ class: extra-details
|
||||
|
||||
- I/O activity (disk, network), per operation or volume
|
||||
|
||||
- Physical/hardware (when applicable): temperature, fan speed ...
|
||||
- Physical/hardware (when applicable): temperature, fan speed...
|
||||
|
||||
- ... and much more!
|
||||
- ...and much more!
|
||||
|
||||
---
|
||||
|
||||
@@ -448,7 +448,7 @@ class: extra-details
|
||||
- RAM breakdown will be different
|
||||
|
||||
- active vs inactive memory
|
||||
- some memory is *shared* between containers, and accounted specially
|
||||
- some memory is *shared* between containers, and specially accounted for
|
||||
|
||||
- I/O activity is also harder to track
|
||||
|
||||
@@ -467,11 +467,11 @@ class: extra-details
|
||||
|
||||
- Arbitrary metrics related to your application and business
|
||||
|
||||
- System performance: request latency, error rate ...
|
||||
- System performance: request latency, error rate...
|
||||
|
||||
- Volume information: number of rows in database, message queue size ...
|
||||
- Volume information: number of rows in database, message queue size...
|
||||
|
||||
- Business data: inventory, items sold, revenue ...
|
||||
- Business data: inventory, items sold, revenue...
|
||||
|
||||
---
|
||||
|
||||
@@ -541,8 +541,8 @@ class: extra-details
|
||||
|
||||
- That person can set up queries and dashboards for the rest of the team
|
||||
|
||||
- It's a little bit likeknowing how to optimize SQL queries, Dockerfiles ...
|
||||
- It's a little bit like knowing how to optimize SQL queries, Dockerfiles...
|
||||
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
... But make sure at least one person in your team is on it 💯
|
||||
...But make sure at least one person in your team is on it 💯
|
||||
|
||||
@@ -86,17 +86,17 @@ Each pod is assigned a QoS class (visible in `status.qosClass`).
|
||||
|
||||
- as long as the container uses less than the limit, it won't be affected
|
||||
|
||||
- if all containers in a pod have *(limits=requests)*, QoS is "Guaranteed"
|
||||
- if all containers in a pod have *(limits=requests)*, QoS is considered "Guaranteed"
|
||||
|
||||
- If requests < limits:
|
||||
|
||||
- as long as the container uses less than the request, it won't be affected
|
||||
|
||||
- otherwise, it might be killed / evicted if the node gets overloaded
|
||||
- otherwise, it might be killed/evicted if the node gets overloaded
|
||||
|
||||
- if at least one container has *(requests<limits)*, QoS is "Burstable"
|
||||
- if at least one container has *(requests<limits)*, QoS is considered "Burstable"
|
||||
|
||||
- If a pod doesn't have any request nor limit, QoS is "BestEffort"
|
||||
- If a pod doesn't have any request nor limit, QoS is considered "BestEffort"
|
||||
|
||||
---
|
||||
|
||||
@@ -400,7 +400,7 @@ These quotas will apply to the namespace where the ResourceQuota is created.
|
||||
|
||||
- Quotas can be created with a YAML definition
|
||||
|
||||
- ... Or with the `kubectl create quota` command
|
||||
- ...Or with the `kubectl create quota` command
|
||||
|
||||
- Example:
|
||||
```bash
|
||||
|
||||
@@ -90,4 +90,4 @@
|
||||
|
||||
- For a longer list, check the Kubernetes documentation:
|
||||
<br/>
|
||||
it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/pick-right-solution/) to set up Kubernetes.
|
||||
it has a great guide to [pick the right solution](https://kubernetes.io/docs/setup/#production-environment) to set up Kubernetes.
|
||||
|
||||
@@ -20,7 +20,7 @@ with a cloud provider
|
||||
|
||||
## EKS (the hard way)
|
||||
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
|
||||
- [Read the doc](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
|
||||
|
||||
- Create service roles, VPCs, and a bunch of other oddities
|
||||
|
||||
@@ -69,6 +69,8 @@ with a cloud provider
|
||||
eksctl get clusters
|
||||
```
|
||||
|
||||
.footnote[Note: the AWS documentation has been updated and now includes [eksctl instructions](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html).]
|
||||
|
||||
---
|
||||
|
||||
## GKE (initial setup)
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
## A possible approach
|
||||
|
||||
- Since each component of the control plane can be replicated ...
|
||||
- Since each component of the control plane can be replicated...
|
||||
|
||||
- We could set up the control plane outside of the cluster
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
- Worst case scenario, we might need to:
|
||||
|
||||
- set up a new control plane (outside of the cluster)
|
||||
|
||||
|
||||
- restore a backup from the old control plane
|
||||
|
||||
|
||||
- move the new control plane to the cluster (again)
|
||||
|
||||
- This doesn't sound like a great experience
|
||||
@@ -57,7 +57,7 @@
|
||||
- The kubelet can also get a list of *static pods* from:
|
||||
|
||||
- a directory containing one (or multiple) *manifests*, and/or
|
||||
|
||||
|
||||
- a URL (serving a *manifest*)
|
||||
|
||||
- These "manifests" are basically YAML definitions
|
||||
@@ -100,11 +100,11 @@
|
||||
|
||||
## Static pods vs normal pods
|
||||
|
||||
- The API only gives us a read-only access to static pods
|
||||
- The API only gives us read-only access to static pods
|
||||
|
||||
- We can `kubectl delete` a static pod ...
|
||||
- We can `kubectl delete` a static pod...
|
||||
|
||||
... But the kubelet will re-mirror it immediately
|
||||
...But the kubelet will re-mirror it immediately
|
||||
|
||||
- Static pods can be selected just like other pods
|
||||
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Administrator
|
||||
Training
|
||||
|
||||
chat: "[Slack](https://wework.slack.com/messages/CK6PD2EUF/)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
#chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://wwrk-2019-06.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- - k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
- k8s/owners-and-dependents.md
|
||||
# DAY 2
|
||||
- - k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/ingress.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
# DAY 3
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- - k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
#DAY 4
|
||||
- - k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- - k8s/control-plane-auth.md
|
||||
- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -2,34 +2,18 @@
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Tiny Shell Script LLC)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Tiny Shell Script LLC)
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Ardan Labs LLC)
|
||||
|
||||
- The training will run from 9am to 5pm
|
||||
|
||||
- There will be a lunch break
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
(Let's make sure right now that we all are on that channel!)
|
||||
|
||||
---
|
||||
|
||||
## Logistics
|
||||
|
||||
Training schedule for the 4 days:
|
||||
|
||||
|||
|
||||
|-------------------|--------------------|
|
||||
| 9:00am | Start of training
|
||||
| 10:30am → 11:00am | Break
|
||||
| 12:30pm → 1:30pm | Lunch
|
||||
| 3:00pm → 3:30pm | Break
|
||||
| 5:00pm | End of training
|
||||
|
||||
- Lunch will be catered
|
||||
|
||||
- During the breaks, the instructors will be available for Q&A
|
||||
|
||||
- Make sure to hydrate / caffeinate / stretch out your limbs :)
|
||||
@@ -1,15 +1,15 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
Advanced
|
||||
Training
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/training-20190613-sanfrancisco)"
|
||||
#chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
slides: http://sfsf-2019-06.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -27,42 +27,35 @@ chapters:
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/apilb.md
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
###- k8s/operators-design.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
# DAY 2
|
||||
- - k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/create-more-charts.md
|
||||
- - k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- - k8s/authn-authz.md
|
||||
- k8s/control-plane-auth.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -80,7 +80,7 @@ and displays aggregated logs.
|
||||
|
||||
- DockerCoins is *not* a cryptocurrency
|
||||
|
||||
(the only common points are "randomness", "hashing", and "coins" in the name)
|
||||
(the only common points are "randomness," "hashing," and "coins" in the name)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -11,11 +11,5 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*<br/>
|
||||
*Thank you!*
|
||||
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user