mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-19 20:19:54 +00:00
Compare commits
26 Commits
qconsf2018
...
velocityeu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c596f54dfc | ||
|
|
99271a09d3 | ||
|
|
e51f110e9d | ||
|
|
58936f098f | ||
|
|
d6f01d5302 | ||
|
|
d5d281b627 | ||
|
|
0633f952d4 | ||
|
|
a93c618154 | ||
|
|
4a25c66206 | ||
|
|
8530dc750f | ||
|
|
0571b1f3a5 | ||
|
|
24e7cab2ca | ||
|
|
09a364f554 | ||
|
|
c18d07b06f | ||
|
|
41cd6ad554 | ||
|
|
565db253bf | ||
|
|
c46baa0f74 | ||
|
|
cb94697a55 | ||
|
|
74a30db7bd | ||
|
|
336cfbe4dc | ||
|
|
48a834e85c | ||
|
|
11ca023e45 | ||
|
|
e2f020b994 | ||
|
|
062e8f124a | ||
|
|
9f1c3db527 | ||
|
|
9a66a894ba |
@@ -132,9 +132,6 @@ spec:
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
run: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
|
||||
@@ -5,6 +5,6 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
run: testweb
|
||||
ingress: []
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
run: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
run: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
run: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
@@ -49,7 +49,7 @@ kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
@@ -60,7 +60,7 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
run: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
|
||||
@@ -123,9 +123,7 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init \
|
||||
--token \$(cat /tmp/token) \
|
||||
--ignore-preflight-errors=SystemVerification
|
||||
sudo kubeadm init --token \$(cat /tmp/token)
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
@@ -149,10 +147,7 @@ _cmd_kube() {
|
||||
pssh --timeout 200 "
|
||||
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token) &&
|
||||
sudo kubeadm join \
|
||||
--discovery-token-unsafe-skip-ca-verification \
|
||||
--ignore-preflight-errors=SystemVerification \
|
||||
--token \$TOKEN node1:6443
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
@@ -175,8 +170,7 @@ EOF"
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
@@ -406,28 +400,6 @@ _cmd_test() {
|
||||
test_tag
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
|
||||
@@ -201,6 +201,5 @@ aws_tag_instances() {
|
||||
}
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 4
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,131 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconsf2018.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this card at the workshop "Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON San Francisco (November 2018).</p>
|
||||
<p>That workshop was a 1-day version of a longer curriculum.</p>
|
||||
<p>If you liked that workshop, the instructor (Jérôme Petazzoni) can deliver it
|
||||
(or the longer version) to your team or organization.</p>
|
||||
<p>You can reach him at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -1 +1 @@
|
||||
/ /kube-fullday.yml.html 200!
|
||||
/ /kube-halfday.yml.html 200!
|
||||
|
||||
@@ -3,17 +3,8 @@
|
||||
country: us
|
||||
event: QCON
|
||||
title: Introduction to Docker and Containers
|
||||
speaker: zeroasterisk
|
||||
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
|
||||
|
||||
- date: 2018-11-08
|
||||
city: San Francisco, CA
|
||||
country: us
|
||||
event: QCON
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration-thursday-section
|
||||
slides: http://qconsf2018.container.training/
|
||||
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
|
||||
|
||||
- date: 2018-11-09
|
||||
city: San Francisco, CA
|
||||
@@ -21,8 +12,7 @@
|
||||
event: QCON
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration-friday-section
|
||||
slides: http://qconsf2018.container.training/
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration
|
||||
|
||||
- date: 2018-10-31
|
||||
city: London, UK
|
||||
@@ -30,7 +20,6 @@
|
||||
event: Velocity EU
|
||||
title: Kubernetes 101
|
||||
speaker: bridgetkromhout
|
||||
slides: https://velocityeu2018.container.training
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149
|
||||
|
||||
- date: 2018-10-30
|
||||
|
||||
@@ -538,7 +538,7 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
- But that we can't create things:
|
||||
```
|
||||
./kubectl create deployment --image=nginx
|
||||
./kubectl run tryme --image=nginx
|
||||
```
|
||||
|
||||
- Exit the container with `exit` or `^D`
|
||||
|
||||
@@ -171,11 +171,7 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
|
||||
--
|
||||
## Default container runtime
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
@@ -185,42 +181,6 @@ No!
|
||||
|
||||
(like CRI-O, or containerd)
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
|
||||
--
|
||||
|
||||
- In this workshop, we run our app on a single node first
|
||||
|
||||
- We will need to build images and ship them around
|
||||
|
||||
- We can do these things without Docker
|
||||
<br/>
|
||||
(and get diagnosed with NIH¹ syndrome)
|
||||
|
||||
- Docker is still the most stable container engine today
|
||||
<br/>
|
||||
(but other options are maturing very quickly)
|
||||
|
||||
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
|
||||
*Yes, almost certainly*
|
||||
|
||||
- On our production servers:
|
||||
|
||||
*Yes (today)*
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
|
||||
|
||||
---
|
||||
@@ -235,11 +195,12 @@ Yes!
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
- pod (group of containers running together on a node)
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
- namespace (more-or-less isolated group of things)
|
||||
- secret (bundle of sensitive data to be passed to a container)
|
||||
|
||||
And much more!
|
||||
|
||||
- And much more!
|
||||
|
||||
- We can see the full list by running `kubectl api-resources`
|
||||
|
||||
@@ -250,25 +211,3 @@ Yes!
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Weave Works
|
||||
|
||||
- a *pod* can have multiple containers working together
|
||||
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
|
||||
- The second diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
- it's one of the best Kubernetes architecture diagrams available!
|
||||
|
||||
Both diagrams used with permission.
|
||||
|
||||
@@ -256,19 +256,19 @@ The master node has [taints](https://kubernetes.io/docs/concepts/configuration/t
|
||||
|
||||
- Let's check the logs of all these `rng` pods
|
||||
|
||||
- All these pods have the label `app=rng`:
|
||||
- All these pods have a `run=rng` label:
|
||||
|
||||
- the first pod, because that's what `kubectl create deployment` does
|
||||
- the first pod, because that's what `kubectl run` does
|
||||
- the other ones (in the daemon set), because we
|
||||
*copied the spec from the first one*
|
||||
|
||||
- Therefore, we can query everybody's logs using that `app=rng` selector
|
||||
- Therefore, we can query everybody's logs using that `run=rng` selector
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of all the pods having a label `app=rng`:
|
||||
- Check the logs of all the pods having a label `run=rng`:
|
||||
```bash
|
||||
kubectl logs -l app=rng --tail 1
|
||||
kubectl get pods -l run=rng -o name | xargs -n 1 kubectl logs --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
@@ -279,51 +279,11 @@ It appears that *all the pods* are serving requests at the moment.
|
||||
|
||||
---
|
||||
|
||||
## Working around `kubectl logs` bugs
|
||||
|
||||
- That last command didn't show what we needed
|
||||
|
||||
- We mentioned earlier that regression affecting `kubectl logs` ...
|
||||
|
||||
(see [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for more details)
|
||||
|
||||
- Let's work around the issue by executing `kubectl logs` one pod at a time
|
||||
|
||||
- For convenience, we'll define a little shell function
|
||||
|
||||
---
|
||||
|
||||
## Our helper function
|
||||
|
||||
- The function `ktail` below will:
|
||||
|
||||
- list the names of all pods matching a selector
|
||||
- display the last line of log for each pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Define `ktail`:
|
||||
```bash
|
||||
ktail () {
|
||||
kubectl get pods -o name -l $1 |
|
||||
xargs -rn1 kubectl logs --tail 1
|
||||
}
|
||||
```
|
||||
|
||||
- Try it:
|
||||
```bash
|
||||
ktail app=rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## The magic of selectors
|
||||
|
||||
- The `rng` *service* is load balancing requests to a set of pods
|
||||
|
||||
- This set of pods is defined as "pods having the label `app=rng`"
|
||||
- This set of pods is defined as "pods having the label `run=rng`"
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -350,7 +310,7 @@ to the associated load balancer.
|
||||
|
||||
--
|
||||
|
||||
- What would happen if we removed the `app=rng` label from that pod?
|
||||
- What would happen if we removed the `run=rng` label from that pod?
|
||||
|
||||
--
|
||||
|
||||
@@ -362,7 +322,7 @@ to the associated load balancer.
|
||||
|
||||
--
|
||||
|
||||
- But but but ... Don't we have more than one pod with `app=rng` now?
|
||||
- But but but ... Don't we have more than one pod with `run=rng` now?
|
||||
|
||||
--
|
||||
|
||||
@@ -385,7 +345,7 @@ to the associated load balancer.
|
||||
<br/>(The second command doesn't require you to get the exact name of the replica set)
|
||||
```bash
|
||||
kubectl describe rs rng-yyyyyyyy
|
||||
kubectl describe rs -l app=rng
|
||||
kubectl describe rs -l run=rng
|
||||
```
|
||||
|
||||
]
|
||||
@@ -473,11 +433,11 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys /app: rng```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys oisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
@@ -492,7 +452,7 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
@@ -508,9 +468,9 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the most recent log line of all `app=rng` pods to confirm that exactly one per node is now active:
|
||||
- Check the most recent log line of all `run=rng` pods to confirm that exactly one per node is now active:
|
||||
```bash
|
||||
kubectl logs -l app=rng --tail 1
|
||||
kubectl logs -l run=rng --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
@@ -536,14 +496,14 @@ The timestamps should give us a hint about how many pods are currently receiving
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the pods with `app=rng` but without `isactive=yes`:
|
||||
- List the pods with `run=rng` but without `isactive=yes`:
|
||||
```bash
|
||||
kubectl get pods -l app=rng,isactive!=yes
|
||||
kubectl get pods -l run=rng,isactive!=yes
|
||||
```
|
||||
|
||||
- Remove these pods:
|
||||
```bash
|
||||
kubectl delete pods -l app=rng,isactive!=yes
|
||||
kubectl delete pods -l run=rng,isactive!=yes
|
||||
```
|
||||
|
||||
]
|
||||
@@ -621,7 +581,7 @@ Ding, dong, the deployment is dead! And the daemon set lives on.
|
||||
labels:
|
||||
isactive: "yes"
|
||||
'
|
||||
kubectl get pods -l app=rng -l controller-revision-hash -o name |
|
||||
kubectl get pods -l run=rng -l controller-revision-hash -o name |
|
||||
xargs kubectl patch -p "$PATCH"
|
||||
```
|
||||
|
||||
|
||||
@@ -392,9 +392,9 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
- Run all three deployments:
|
||||
```bash
|
||||
kubectl create deployment cheddar --image=errm/cheese:cheddar
|
||||
kubectl create deployment stilton --image=errm/cheese:stilton
|
||||
kubectl create deployment wensleydale --image=errm/cheese:wensleydale
|
||||
kubectl run cheddar --image=errm/cheese:cheddar
|
||||
kubectl run stilton --image=errm/cheese:stilton
|
||||
kubectl run wensleydale --image=errm/cheese:wensleydale
|
||||
```
|
||||
|
||||
- Create a service for each of them:
|
||||
|
||||
@@ -57,49 +57,31 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
- We could use the `nginx` official image, but ...
|
||||
|
||||
... we wouldn't be able to tell the backends from each other!
|
||||
|
||||
- We are going to use `jpetazzo/httpenv`, a tiny HTTP server written in Go
|
||||
|
||||
- `jpetazzo/httpenv` listens on port 8888
|
||||
|
||||
- It serves its environment variables in JSON format
|
||||
|
||||
- The environment variables will include `HOSTNAME`, which will be the pod name
|
||||
|
||||
(and therefore, will be different on each backend)
|
||||
|
||||
---
|
||||
|
||||
## Creating a deployment for our HTTP server
|
||||
|
||||
- We *could* do `kubectl run httpenv --image=jpetazzo/httpenv` ...
|
||||
|
||||
- But since `kubectl run` is being deprecated, let's see how to use `kubectl create` instead
|
||||
|
||||
.exercise[
|
||||
|
||||
- In another window, watch the pods (to see when they will be created):
|
||||
- Start a bunch of HTTP servers:
|
||||
```bash
|
||||
kubectl run httpenv --image=jpetazzo/httpenv --replicas=10
|
||||
```
|
||||
|
||||
- Watch them being started:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!-- ```keys ^C``` -->
|
||||
|
||||
- Create a deployment for this very lightweight HTTP server:
|
||||
```bash
|
||||
kubectl create deployment httpenv --image=jpetazzo/httpenv
|
||||
```
|
||||
|
||||
- Scale it to 10 replicas:
|
||||
```bash
|
||||
kubectl scale deployment httpenv --replicas=10
|
||||
```
|
||||
<!--
|
||||
```wait httpenv-```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
The `jpetazzo/httpenv` image runs an HTTP server on port 8888.
|
||||
<br/>
|
||||
It serves its environment variables in JSON format.
|
||||
|
||||
The `-w` option "watches" events happening on the specified resources.
|
||||
|
||||
---
|
||||
|
||||
## Exposing our deployment
|
||||
@@ -110,12 +92,12 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
|
||||
|
||||
- Expose the HTTP port of our server:
|
||||
```bash
|
||||
kubectl expose deployment httpenv --port 8888
|
||||
kubectl expose deploy/httpenv --port 8888
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
```bash
|
||||
kubectl get service
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
@@ -255,7 +237,7 @@ class: extra-details
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l app=httpenv -o wide
|
||||
kubectl get pods -l run=httpenv -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -173,11 +173,6 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
|
||||
kubectl scale deploy/pingpong --replicas 8
|
||||
```
|
||||
|
||||
- Note that this command does exactly the same thing:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas 8
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: what if we tried to scale `replicaset.apps/pingpong-xxxxxxxxxx`?
|
||||
@@ -295,20 +290,6 @@ Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple
|
||||
|
||||
---
|
||||
|
||||
## `kubectl logs -l ... --tail N`
|
||||
|
||||
- With Kubernetes 1.12 (and up to at least 1.12.2), the last command shows multiple lines
|
||||
|
||||
- This is a regression when `--tail` is used together with `-l`/`--selector`
|
||||
|
||||
- It always shows the last 10 lines of output for each container
|
||||
|
||||
(instead of the number of lines specified on the command line)
|
||||
|
||||
- See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details
|
||||
|
||||
---
|
||||
|
||||
## Aren't we flooding 1.1.1.1?
|
||||
|
||||
- If you're wondering this, good question!
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.12.2/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
|
||||
@@ -62,12 +62,10 @@ Exactly what we need!
|
||||
- The following commands will install Stern on a Linux Intel 64 bit machine:
|
||||
```bash
|
||||
sudo curl -L -o /usr/local/bin/stern \
|
||||
https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64
|
||||
https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
---
|
||||
|
||||
## Using Stern
|
||||
@@ -132,13 +130,11 @@ Exactly what we need!
|
||||
|
||||
- We can use that property to view the logs of all the pods created with `kubectl run`
|
||||
|
||||
- Similarly, everything created with `kubectl create deployment` has a label `app`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the logs for all the things started with `kubectl create deployment`:
|
||||
- View the logs for all the things started with `kubectl run`:
|
||||
```bash
|
||||
stern -l app
|
||||
stern -l run
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
@@ -214,10 +214,6 @@ Note: it might take a minute or two for the app to be up and running.
|
||||
kubens -
|
||||
```
|
||||
|
||||
- On our clusters, `kubens` is called `kns` instead
|
||||
|
||||
(so that it's even fewer keystrokes to switch namespaces)
|
||||
|
||||
---
|
||||
|
||||
## `kubens` and `kubectx`
|
||||
|
||||
@@ -117,13 +117,13 @@ This is our game plan:
|
||||
|
||||
- Let's use the `nginx` image:
|
||||
```bash
|
||||
kubectl create deployment testweb --image=nginx
|
||||
kubectl run testweb --image=nginx
|
||||
```
|
||||
|
||||
- Find out the IP address of the pod with one of these two commands:
|
||||
```bash
|
||||
kubectl get pods -o wide -l app=testweb
|
||||
IP=$(kubectl get pods -l app=testweb -o json | jq -r .items[0].status.podIP)
|
||||
kubectl get pods -o wide -l run=testweb
|
||||
IP=$(kubectl get pods -l run=testweb -o json | jq -r .items[0].status.podIP)
|
||||
```
|
||||
|
||||
- Check that we can connect to the server:
|
||||
@@ -138,7 +138,7 @@ The `curl` command should show us the "Welcome to nginx!" page.
|
||||
|
||||
## Adding a very restrictive network policy
|
||||
|
||||
- The policy will select pods with the label `app=testweb`
|
||||
- The policy will select pods with the label `run=testweb`
|
||||
|
||||
- It will specify an empty list of ingress rules (matching nothing)
|
||||
|
||||
@@ -172,7 +172,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
run: testweb
|
||||
ingress: []
|
||||
```
|
||||
|
||||
@@ -207,7 +207,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
run: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
@@ -325,7 +325,7 @@ spec:
|
||||
|
||||
## Allowing traffic to `webui` pods
|
||||
|
||||
This policy selects all pods with label `app=webui`.
|
||||
This policy selects all pods with label `run=webui`.
|
||||
|
||||
It allows traffic from any source.
|
||||
|
||||
@@ -339,7 +339,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
run: webui
|
||||
ingress:
|
||||
- from: []
|
||||
```
|
||||
|
||||
@@ -4,22 +4,6 @@ Our app on Kube
|
||||
|
||||
---
|
||||
|
||||
## What's on the menu?
|
||||
|
||||
In this part, we will:
|
||||
|
||||
- **build** images for our app,
|
||||
|
||||
- **ship** these images with a registry,
|
||||
|
||||
- **run** deployments using these images,
|
||||
|
||||
- expose these deployments so they can communicate with each other,
|
||||
|
||||
- expose the web UI so we can access it from outside.
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Build on our control node (`node1`)
|
||||
@@ -74,7 +58,7 @@ In this part, we will:
|
||||
|
||||
- Create the registry service:
|
||||
```bash
|
||||
kubectl create deployment registry --image=registry
|
||||
kubectl run registry --image=registry
|
||||
```
|
||||
|
||||
- Expose it on a NodePort:
|
||||
@@ -131,47 +115,6 @@ We should see:
|
||||
|
||||
---
|
||||
|
||||
## Testing our local registry
|
||||
|
||||
- We can retag a small image, and push it to the registry
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we have the busybox image, and retag it:
|
||||
```bash
|
||||
docker pull busybox
|
||||
docker tag busybox $REGISTRY/busybox
|
||||
```
|
||||
|
||||
- Push it:
|
||||
```bash
|
||||
docker push $REGISTRY/busybox
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking again what's on our local registry
|
||||
|
||||
- Let's use the same endpoint as before
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ensure that our busybox image is now in the local registry:
|
||||
```bash
|
||||
curl $REGISTRY/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The curl command should now output:
|
||||
```json
|
||||
{"repositories":["busybox"]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Building and pushing our images
|
||||
|
||||
- We are going to use a convenient feature of Docker Compose
|
||||
@@ -246,27 +189,6 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Catching up
|
||||
|
||||
- If you have problems deploying the registry ...
|
||||
|
||||
- Or building or pushing the images ...
|
||||
|
||||
- Don't worry: we provide pre-built images hosted on the Docker Hub!
|
||||
|
||||
- The images are named `dockercoins/worker:v0.1`, `dockercoins/rng:v0.1`, etc.
|
||||
|
||||
- To use them, just set the `REGISTRY` environment variable to `dockercoins`:
|
||||
```bash
|
||||
export REGISTRY=dockercoins
|
||||
```
|
||||
|
||||
- Make sure to set the `TAG` to `v0.1`
|
||||
|
||||
(our repositories on the Docker Hub do not provide a `latest` tag)
|
||||
|
||||
---
|
||||
|
||||
## Deploying all the things
|
||||
|
||||
- We can now deploy our code (as well as a redis instance)
|
||||
@@ -275,13 +197,13 @@ class: extra-details
|
||||
|
||||
- Deploy `redis`:
|
||||
```bash
|
||||
kubectl create deployment redis --image=redis
|
||||
kubectl run redis --image=redis
|
||||
```
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
kubectl run $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
@@ -22,19 +22,14 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's create a deployment running `nginx`:
|
||||
- Let's start a replicated `nginx` deployment:
|
||||
```bash
|
||||
kubectl create deployment yanginx --image=nginx
|
||||
```
|
||||
|
||||
- Scale it to a few replicas:
|
||||
```bash
|
||||
kubectl scale deployment yanginx --replicas=3
|
||||
kubectl run yanginx --image=nginx --replicas=3
|
||||
```
|
||||
|
||||
- Once it's up, check the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l app=yanginx -o yaml | head -n 25
|
||||
kubectl get pods -l run=yanginx -o yaml | head -n 25
|
||||
```
|
||||
|
||||
]
|
||||
@@ -104,12 +99,12 @@ so the lines should not be indented (otherwise the indentation will insert space
|
||||
|
||||
- Delete the Deployment:
|
||||
```bash
|
||||
kubectl delete deployment -l app=yanginx --cascade=false
|
||||
kubectl delete deployment -l run=yanginx --cascade=false
|
||||
```
|
||||
|
||||
- Delete the Replica Set:
|
||||
```bash
|
||||
kubectl delete replicaset -l app=yanginx --cascade=false
|
||||
kubectl delete replicaset -l run=yanginx --cascade=false
|
||||
```
|
||||
|
||||
- Check that the pods are still here:
|
||||
@@ -131,7 +126,7 @@ class: extra-details
|
||||
|
||||
- If we change the labels on a dependent, so that it's not selected anymore
|
||||
|
||||
(e.g. change the `app: yanginx` in the pods of the previous example)
|
||||
(e.g. change the `run: yanginx` in the pods of the previous example)
|
||||
|
||||
- If a deployment tool that we're using does these things for us
|
||||
|
||||
@@ -179,4 +174,4 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
As always, the [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) has useful extra information and pointers.
|
||||
As always, the [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) has useful extra information and pointers.
|
||||
@@ -151,7 +151,7 @@ scrape_configs:
|
||||
|
||||
## Running Prometheus on our cluster
|
||||
|
||||
We would need to:
|
||||
We need to:
|
||||
|
||||
- Run the Prometheus server in a pod
|
||||
|
||||
@@ -171,21 +171,19 @@ We would need to:
|
||||
|
||||
## Helm Charts to the rescue
|
||||
|
||||
- To make our lives easier, we could use a Helm Chart
|
||||
- To make our lives easier, we are going to use a Helm Chart
|
||||
|
||||
- The Helm Chart would take care of all the steps explained above
|
||||
- The Helm Chart will take care of all the steps explained above
|
||||
|
||||
(including some extra features that we don't need, but won't hurt)
|
||||
|
||||
- In fact, Prometheus has been pre-installed on our clusters with Helm
|
||||
|
||||
(it was pre-installed so that it would be populated with metrics by now)
|
||||
|
||||
---
|
||||
|
||||
## Step 1: if we had to install Helm
|
||||
## Step 1: install Helm
|
||||
|
||||
- Note that if Helm is already installed, these commands won't break anything
|
||||
- If we already installed Helm earlier, these commands won't break anything
|
||||
|
||||
.exercice[
|
||||
|
||||
- Install Tiller (Helm's server-side component) on our cluster:
|
||||
```bash
|
||||
@@ -198,17 +196,27 @@ We would need to:
|
||||
--clusterrole=cluster-admin --serviceaccount=kube-system:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Step 2: if we had to install Prometheus
|
||||
## Step 2: install Prometheus
|
||||
|
||||
- This is how we would use Helm to deploy Prometheus on the cluster:
|
||||
- Skip this if we already installed Prometheus earlier
|
||||
|
||||
(in doubt, check with `helm list`)
|
||||
|
||||
.exercice[
|
||||
|
||||
- Install Prometheus on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The provided flags:
|
||||
|
||||
- expose the server web UI (and API) on a NodePort
|
||||
@@ -227,13 +235,11 @@ The provided flags:
|
||||
|
||||
- Figure out the NodePort that was allocated to the Prometheus server:
|
||||
```bash
|
||||
kubectl get svc -n kube-system | grep prometheus-server
|
||||
kubectl get svc | grep prometheus-server
|
||||
```
|
||||
|
||||
- With your browser, connect to that port
|
||||
|
||||
(spoiler alert: it should be 30090)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -4,9 +4,7 @@
|
||||
|
||||
--
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu 18.04 LTS
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu 16.04 LTS
|
||||
|
||||
1. Install Docker
|
||||
|
||||
@@ -38,10 +36,6 @@
|
||||
|
||||
(At least ... not yet! Though it's [experimental in 1.12](https://kubernetes.io/docs/setup/independent/high-availability/).)
|
||||
|
||||
--
|
||||
|
||||
- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme
|
||||
|
||||
---
|
||||
|
||||
## Other deployment options
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.12.2
|
||||
- Docker Engine 18.09.0
|
||||
- Kubernetes 1.12.1
|
||||
- Docker Engine 18.06.1-ce
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -1,26 +1,3 @@
|
||||
# Next steps
|
||||
|
||||
*Alright, how do I get started and containerize my apps?*
|
||||
|
||||
--
|
||||
|
||||
Suggested containerization checklist:
|
||||
|
||||
.checklist[
|
||||
- write a Dockerfile for one service in one app
|
||||
- write Dockerfiles for the other (buildable) services
|
||||
- write a Compose file for that whole app
|
||||
- make sure that devs are empowered to run the app in containers
|
||||
- set up automated builds of container images from the code repo
|
||||
- set up a CI pipeline using these container images
|
||||
- set up a CD pipeline (for staging/QA) using these images
|
||||
]
|
||||
|
||||
And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Options for our first production cluster
|
||||
|
||||
- Get a managed cluster from a major cloud provider (AKS, EKS, GKE...)
|
||||
@@ -57,38 +34,6 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
## Namespaces
|
||||
|
||||
- Namespaces let you run multiple identical stacks side by side
|
||||
|
||||
- Two namespaces (e.g. `blue` and `green`) can each have their own `redis` service
|
||||
|
||||
- Each of the two `redis` services has its own `ClusterIP`
|
||||
|
||||
- CoreDNS creates two entries, mapping to these two `ClusterIP` addresses:
|
||||
|
||||
`redis.blue.svc.cluster.local` and `redis.green.svc.cluster.local`
|
||||
|
||||
- Pods in the `blue` namespace get a *search suffix* of `blue.svc.cluster.local`
|
||||
|
||||
- As a result, resolving `redis` from a pod in the `blue` namespace yields the "local" `redis`
|
||||
|
||||
.warning[This does not provide *isolation*! That would be the job of network policies.]
|
||||
|
||||
---
|
||||
|
||||
## Relevant sections
|
||||
|
||||
- [Namespaces](kube-selfpaced.yml.html#toc-namespaces)
|
||||
|
||||
- [Network Policies](kube-selfpaced.yml.html#toc-network-policies)
|
||||
|
||||
- [Role-Based Access Control](kube-selfpaced.yml.html#toc-authentication-and-authorization)
|
||||
|
||||
(covers permissions model, user and service accounts management ...)
|
||||
|
||||
---
|
||||
|
||||
## Stateful services (databases etc.)
|
||||
|
||||
- As a first step, it is wiser to keep stateful services *outside* of the cluster
|
||||
@@ -125,13 +70,6 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
- what do we gain by deploying this stateful service on Kubernetes?
|
||||
|
||||
- Relevant sections:
|
||||
[Volumes](kube-selfpaced.yml.html#toc-volumes)
|
||||
|
|
||||
[Stateful Sets](kube-selfpaced.yml.html#toc-stateful-sets)
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
@@ -149,7 +87,7 @@ And *then* it is time to look at orchestration!
|
||||
- URI mapping
|
||||
- and much more!
|
||||
|
||||
- [This section](kube-selfpaced.yml.html#toc-exposing-http-services-with-ingress-resources) shows how to expose multiple HTTP apps using [Træfik](https://docs.traefik.io/user-guide/kubernetes/)
|
||||
- Check out e.g. [Træfik](https://docs.traefik.io/user-guide/kubernetes/)
|
||||
|
||||
---
|
||||
|
||||
@@ -165,8 +103,6 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
(e.g. with an agent bind-mounting the log directory)
|
||||
|
||||
- [This section](kube-selfpaced.yml.html#toc-centralized-logging) shows how to do that with [Fluentd](https://docs.fluentd.org/v0.12/articles/kubernetes-fluentd) and the EFK stack
|
||||
|
||||
---
|
||||
|
||||
## Metrics
|
||||
@@ -201,8 +137,6 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
(It's the container equivalent of the password on a post-it note on your screen)
|
||||
|
||||
- [This section](kube-selfpaced.yml.html#toc-managing-configuration) shows how to manage app config with config maps (among others)
|
||||
|
||||
---
|
||||
|
||||
## Managing stack deployments
|
||||
@@ -259,16 +193,3 @@ Sorry Star Trek fans, this is not the federation you're looking for!
|
||||
|
||||
- Discover resources across clusters
|
||||
|
||||
---
|
||||
|
||||
## Developer experience
|
||||
|
||||
*We've put this last, but it's pretty important!*
|
||||
|
||||
- How do you on-board a new developer?
|
||||
|
||||
- What do they need to install to get a dev stack?
|
||||
|
||||
- How does a code change make it from dev to prod?
|
||||
|
||||
- How does someone add a component to a stack?
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
title: |
|
||||
Getting Started With
|
||||
Kubernetes and
|
||||
Container Orchestration
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "Gitter ([Thursday](https://gitter.im/jpetazzo/workshop-20181108-sanfrancisco)|[Friday](https://gitter.im/jpetazzo/workshop-20181109-sanfrancisco))"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://qconsf2018.container.training/
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -34,30 +33,30 @@ chapters:
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/ourapponkube.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
# - k8s/build-with-kaniko.md
|
||||
# - k8s/configuration.md
|
||||
#- - k8s/owners-and-dependents.md
|
||||
# - k8s/statefulsets.md
|
||||
# - k8s/portworx.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/portworx.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
@@ -36,19 +35,18 @@ chapters:
|
||||
- - k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
- - k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
# - k8s/logs-centralized.md
|
||||
- k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
|
||||
@@ -5,7 +5,6 @@ title: |
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/ourapponkube.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/portworx.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -2,15 +2,26 @@
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[✨] Bridget ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
|
||||
- .emoji[✨] Bridget Kromhout ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
|
||||
|
||||
- .emoji[🌟] Joe ([@joelaha](https://twitter.com/joelaha))
|
||||
- .emoji[🌟] Joe Laha ([@joelaha](https://twitter.com/joelaha))
|
||||
|
||||
- The workshop will run from 13:30-16:45
|
||||
- The workshop will run from 9:30 - 13:00
|
||||
|
||||
- There will be a break from 15:00-15:15
|
||||
- There will be a break from 11:00 - 11:30
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
---
|
||||
|
||||
## Say hi!
|
||||
|
||||
- We encourage networking at [#velocityconf](https://twitter.com/hashtag/velocityconf?f=tweets&vertical=default&src=hash)
|
||||
|
||||
- Take a minute to introduce yourself to your neighbors
|
||||
|
||||
- Tell them where you're from (where you're based out of & what org you work at)
|
||||
|
||||
- Share what you're hoping to learn in this session! .emoji[✨]
|
||||
|
||||
@@ -1,11 +1,26 @@
|
||||
## Intros
|
||||
|
||||
- Hello! I'm
|
||||
Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
|
||||
- The workshop will run from 9am to 4pm
|
||||
- Hello! We are:
|
||||
|
||||
- There will be a lunch break from noon to 1pm
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
|
||||
<!-- .dummy[
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
|
||||
] -->
|
||||
|
||||
- The workshop will run from ...
|
||||
|
||||
- There will be a lunch break at ...
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
.remark-slide-content:not(.pic) {
|
||||
background-repeat: no-repeat;
|
||||
background-position: 99% 1%;
|
||||
background-size: 8%;
|
||||
background-image: url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
}
|
||||
|
||||
div.extra-details:not(.pic) {
|
||||
background-image: url("images/extra-details.png"), url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
background-position: 0.5% 1%, 99% 1%;
|
||||
background-size: 4%, 8%;
|
||||
}
|
||||
|
||||
.remark-slide-content:not(.pic) div.remark-slide-number {
|
||||
top: 16px;
|
||||
right: 112px
|
||||
}
|
||||
@@ -1,46 +1,4 @@
|
||||
# Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
- navigating directories
|
||||
|
||||
- editing files
|
||||
|
||||
- a little bit of bash-fu (environment variables, loops)
|
||||
|
||||
- Some Docker knowledge
|
||||
|
||||
- `docker run`, `docker ps`, `docker build`
|
||||
|
||||
- ideally, you know how to write a Dockerfile and build it
|
||||
<br/>
|
||||
(even if it's a `FROM` line and a couple of `RUN` commands)
|
||||
|
||||
- It's totally OK if you are not a Docker expert!
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
*Tell me and I forget.*
|
||||
<br/>
|
||||
*Teach me and I remember.*
|
||||
<br/>
|
||||
*Involve me and I learn.*
|
||||
|
||||
Misattributed to Benjamin Franklin
|
||||
|
||||
[(Probably inspired by Chinese Confucian philosopher Xunzi)](https://www.barrypopik.com/index.php/new_york_city/entry/tell_me_and_i_forget_teach_me_and_i_may_remember_involve_me_and_i_will_lear/)
|
||||
|
||||
---
|
||||
|
||||
## Hands-on sections
|
||||
|
||||
- The whole workshop is hands-on
|
||||
|
||||
- We are going to build, ship, and run containers!
|
||||
|
||||
- You are invited to reproduce all the demos
|
||||
## Hands-on
|
||||
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
@@ -50,53 +8,10 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
- Go to @@SLIDES@@ to view these slides
|
||||
|
||||
- Join the chat room: @@CHAT@@
|
||||
|
||||
<!-- ```open @@SLIDES@@``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Where are we going to run our containers?
|
||||
|
||||
---
|
||||
|
||||
class: in-person, pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## You get a cluster of cloud VMs
|
||||
|
||||
- Each person gets a private cluster of cloud VMs (not shared with anybody else)
|
||||
|
||||
- They'll remain up for the duration of the workshop
|
||||
|
||||
- You should have a little card with login+password+IP addresses
|
||||
|
||||
- You can automatically SSH from one VM to another
|
||||
|
||||
- The nodes have aliases: `node1`, `node2`, etc.
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Why don't we run containers locally?
|
||||
|
||||
- Installing that stuff can be hard on some machines
|
||||
|
||||
(32 bits CPU or OS... Laptops without administrator access... etc.)
|
||||
|
||||
- *"The whole team downloaded all these container images from the WiFi!
|
||||
<br/>... and it went great!"* (Literally no-one ever)
|
||||
|
||||
- All you need is a computer (or even a phone or tablet!), with:
|
||||
|
||||
- an internet connection
|
||||
@@ -109,203 +24,18 @@ class: in-person
|
||||
|
||||
class: in-person
|
||||
|
||||
## SSH clients
|
||||
|
||||
- On Linux, OS X, FreeBSD... you are probably all set
|
||||
|
||||
- On Windows, get one of these:
|
||||
|
||||
- [putty](http://www.putty.org/)
|
||||
- Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH)
|
||||
- [Git BASH](https://git-for-windows.github.io/)
|
||||
- [MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- On Android, [JuiceSSH](https://juicessh.com/)
|
||||
([Play Store](https://play.google.com/store/apps/details?id=com.sonelli.juicessh))
|
||||
works pretty well
|
||||
|
||||
- Nice-to-have: [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## What is this Mosh thing?
|
||||
|
||||
*You don't have to use Mosh or even know about it to follow along.
|
||||
<br/>
|
||||
We're just telling you about it because some of us think it's cool!*
|
||||
|
||||
- Mosh is "the mobile shell"
|
||||
|
||||
- It is essentially SSH over UDP, with roaming features
|
||||
|
||||
- It retransmits packets quickly, so it works great even on lossy connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It has intelligent local echo, so it works great even in high-latency connections
|
||||
|
||||
(Like hotel or conference WiFi)
|
||||
|
||||
- It supports transparent roaming when your client IP address changes
|
||||
|
||||
(Like when you hop from hotel to conference WiFi)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## Using Mosh
|
||||
|
||||
- To install it: `(apt|yum|brew) install mosh`
|
||||
|
||||
- It has been pre-installed on the VMs that we are using
|
||||
|
||||
- To connect to a remote machine: `mosh user@host`
|
||||
|
||||
(It is going to establish an SSH connection, then hand off to UDP)
|
||||
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
|
||||
@@ -77,79 +77,6 @@ and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file format version
|
||||
|
||||
*Particularly relevant if you have used Compose before...*
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
|
||||
---
|
||||
|
||||
## Service discovery in container-land
|
||||
|
||||
- We do not hard-code IP addresses in the code
|
||||
|
||||
- We do not hard-code FQDN in the code, either
|
||||
|
||||
- We just connect to a service name, and container-magic does the rest
|
||||
|
||||
(And by container-magic, we mean "a crafty, dynamic, embedded DNS server")
|
||||
|
||||
---
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||
```python
|
||||
redis = Redis("`redis`")
|
||||
|
||||
|
||||
def get_random_bytes():
|
||||
r = requests.get("http://`rng`/32")
|
||||
return r.content
|
||||
|
||||
|
||||
def hash_bytes(data):
|
||||
r = requests.post("http://`hasher`/",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/octet-stream"})
|
||||
```
|
||||
|
||||
(Full source code available [here](
|
||||
https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Links, naming, and service discovery
|
||||
|
||||
- Containers can have network aliases (resolvable through DNS)
|
||||
|
||||
- Compose file version 2+ makes each container reachable through its service name
|
||||
|
||||
- Compose file version 1 did require "links" sections
|
||||
|
||||
- Network aliases are automatically namespaced
|
||||
|
||||
- you can have multiple apps declaring and using a service named `database`
|
||||
|
||||
- containers in the blue app will resolve `database` to the IP of the blue database
|
||||
|
||||
- containers in the green app will resolve `database` to the IP of the green database
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
--
|
||||
@@ -240,50 +167,6 @@ Stop the app with `^C`, edit `dockercoins.yml`, comment out the `volumes` sectio
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why does the speed seem irregular?
|
||||
|
||||
- It *looks like* the speed is approximately 4 hashes/second
|
||||
|
||||
- Or more precisely: 4 hashes/second, with regular dips down to zero
|
||||
|
||||
- Why?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- The app actually has a constant, steady speed: 3.33 hashes/second
|
||||
<br/>
|
||||
(which corresponds to 1 hash every 0.3 seconds, for *reasons*)
|
||||
|
||||
- Yes, and?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The reason why this graph is *not awesome*
|
||||
|
||||
- The worker doesn't update the counter after every loop, but up to once per second
|
||||
|
||||
- The speed is computed by the browser, checking the counter about once per second
|
||||
|
||||
- Between two consecutive updates, the counter will increase either by 4, or by 0
|
||||
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - 0 etc.
|
||||
|
||||
- What can we conclude from this?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- "I'm clearly incapable of writing good frontend code!" 😀 — Jérôme
|
||||
|
||||
---
|
||||
|
||||
## Stopping the application
|
||||
|
||||
- If we interrupt Compose (with `^C`), it will politely ask the Docker Engine to stop the app
|
||||
|
||||
@@ -4,25 +4,15 @@ Thank you!
|
||||
|
||||
---
|
||||
|
||||
class: title, in-person
|
||||
# Thank you!
|
||||
|
||||
That's all, folks! <br/> Questions?
|
||||
- The clusters will be shut down tonight
|
||||
|
||||

|
||||
- If you like:
|
||||
|
||||
---
|
||||
- [rate this tutorial on the Velocity website](https://conferences.oreilly.com/velocity/vl-eu/public/schedule/evaluate/71149?eval=71149)
|
||||
|
||||
## Final words
|
||||
- [tweet about what you learned](https://twitter.com/intent/tweet?url=https%3A%2F%2Fcontainer.training&text=Learning%20k8s%20with%20@bridgetkromhout%21&hashtags=VelocityConf), mentioning @bridgetkromhout and #VelocityConf
|
||||
|
||||
- You can find more content on http://container.training/
|
||||
- [questions, comments, pull requests, workshop invitations, etc](https://github.com/jpetazzo/container.training/)
|
||||
|
||||
(More slides, videos, dates of upcoming workshops and tutorials...)
|
||||
|
||||
- If you want me to train your team:
|
||||
[contact me!](https://docs.google.com/forms/d/e/1FAIpQLScm2evHMvRU8C5ZK59l8FGsLY_Kkup9P_GHgjfByUMyMpMmDA/viewform)
|
||||
|
||||
(This workshop is also available as longer training sessions, covering advanced topics)
|
||||
|
||||
- The organizers of this conference would like you to rate this workshop!
|
||||
|
||||
.footnote[*Thank you!*]
|
||||
@@ -8,7 +8,13 @@ class: title, self-paced
|
||||
|
||||
class: title, in-person
|
||||
|
||||
@@TITLE@@<br/></br>
|
||||
@@TITLE@@
|
||||
|
||||
WiFi: OReillyCon<br>
|
||||
Password: oreilly18
|
||||
|
||||
<br>
|
||||
<br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
<title>@@TITLE@@</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<link rel="stylesheet" href="workshop.css">
|
||||
<link rel="stylesheet" href="override.css">
|
||||
</head>
|
||||
<body>
|
||||
<!--
|
||||
|
||||
Reference in New Issue
Block a user