mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 17:30:20 +00:00
Compare commits
79 Commits
jpetazzo-l
...
kube-2019-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99b8886c3e | ||
|
|
13164971ca | ||
|
|
c55ce66751 | ||
|
|
e280cec60f | ||
|
|
e1ccad3ee2 | ||
|
|
83535c3f69 | ||
|
|
c8047897e7 | ||
|
|
869f46060a | ||
|
|
9ece59e821 | ||
|
|
258c134421 | ||
|
|
c6d9edbf12 | ||
|
|
cd7a5520fb | ||
|
|
c1757160d7 | ||
|
|
5fc62e8fd7 | ||
|
|
4f8ffcdd97 | ||
|
|
f207adfe13 | ||
|
|
a66b295d06 | ||
|
|
ae04e02519 | ||
|
|
7b03961182 | ||
|
|
8c2107fba9 | ||
|
|
d4096e9c21 | ||
|
|
5c89738ab6 | ||
|
|
893a84feb7 | ||
|
|
f807964416 | ||
|
|
2ea9cbb00f | ||
|
|
8cd9a314d3 | ||
|
|
ede085cf48 | ||
|
|
bc349d6c4d | ||
|
|
80d6b57697 | ||
|
|
5c2599a2b9 | ||
|
|
a6f6ff161d | ||
|
|
6aaa8fab75 | ||
|
|
01042101a2 | ||
|
|
a1adbb66c8 | ||
|
|
3212561c89 | ||
|
|
003a232b79 | ||
|
|
2770da68cd | ||
|
|
c502d019ff | ||
|
|
a07e50ecf8 | ||
|
|
46c6866ce9 | ||
|
|
fe95318108 | ||
|
|
65232f93ba | ||
|
|
9fa7b958dc | ||
|
|
a95e5c960e | ||
|
|
5b87162e95 | ||
|
|
8c4914294e | ||
|
|
7b9b9f527d | ||
|
|
3c7f39747c | ||
|
|
be67a742ee | ||
|
|
556db65251 | ||
|
|
ff781a3065 | ||
|
|
8348d750df | ||
|
|
9afa0acbf9 | ||
|
|
cb624755e4 | ||
|
|
523ca55831 | ||
|
|
f0b48935fa | ||
|
|
33e1bfd8be | ||
|
|
2efc29991e | ||
|
|
11387f1330 | ||
|
|
fe93dccbac | ||
|
|
5fad84a7cf | ||
|
|
22dd6b4e70 | ||
|
|
a3594e7e1e | ||
|
|
7f74e5ce32 | ||
|
|
9e051abb32 | ||
|
|
3ebcfd142b | ||
|
|
6c5d049c4c | ||
|
|
072ba44cba | ||
|
|
bc8a9dc4e7 | ||
|
|
b1ba881eee | ||
|
|
337a5d94ed | ||
|
|
43acccc0af | ||
|
|
4a447c7bf5 | ||
|
|
b9de73d0fd | ||
|
|
3f7675be04 | ||
|
|
9a6160ba1f | ||
|
|
f23272d154 | ||
|
|
f01bc2a7a9 | ||
|
|
3eaa844c55 |
@@ -199,7 +199,7 @@ this section is for you!
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from loosing your place if you get disconnected from servers.
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
|
||||
@@ -5,6 +5,3 @@ RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
HEALTHCHECK \
|
||||
--interval=1s --timeout=2s --retries=3 --start-period=1s \
|
||||
CMD curl http://localhost/ || exit 1
|
||||
|
||||
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
image: elasticsearch:2
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
image: kibana:4
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
@@ -1,3 +1,37 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -24,6 +58,7 @@ spec:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@@ -37,18 +72,11 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.2.2"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: "consul:1.4.0"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
|
||||
@@ -132,6 +132,9 @@ spec:
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
|
||||
@@ -5,6 +5,6 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress: []
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: webui
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: socat
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
@@ -49,7 +49,7 @@ kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
@@ -60,7 +60,7 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: socat
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
|
||||
@@ -32,7 +32,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ source path/to/your-ansible-clone/hacking/env-setup
|
||||
|
||||
- you need to repeat the last step everytime you open a new terminal session
|
||||
- you need to repeat the last step every time you open a new terminal session
|
||||
and want to use any Ansible command (but you'll probably only need to run
|
||||
it once).
|
||||
|
||||
|
||||
@@ -54,6 +54,9 @@ need_infra() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify infrastructure file. (e.g.: infra/aws)"
|
||||
fi
|
||||
if [ "$1" = "--infra" ]; then
|
||||
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Infrastructure file $1 doesn't exist."
|
||||
fi
|
||||
|
||||
@@ -170,7 +170,8 @@ EOF"
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64 &&
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
@@ -400,6 +401,28 @@ _cmd_test() {
|
||||
test_tag
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
|
||||
@@ -201,5 +201,6 @@ aws_tag_instances() {
|
||||
}
|
||||
|
||||
aws_get_ami() {
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
25
prepare-vms/settings/jerome.yaml
Normal file
25
prepare-vms/settings/jerome.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 4
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
131
prepare-vms/templates/jerome.html
Normal file
131
prepare-vms/templates/jerome.html
Normal file
@@ -0,0 +1,131 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconsf2018.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this card at the workshop "Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON San Francisco (November 2018).</p>
|
||||
<p>That workshop was a 1-day version of a longer curriculum.</p>
|
||||
<p>If you liked that workshop, the instructor (Jérôme Petazzoni) can deliver it
|
||||
(or the longer version) to your team or organization.</p>
|
||||
<p>You can reach him at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
1
slides/_redirects
Normal file
1
slides/_redirects
Normal file
@@ -0,0 +1 @@
|
||||
/ /kube-twodays.yml.html 200!
|
||||
@@ -156,6 +156,36 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Service meshes
|
||||
|
||||
* A service mesh is a configurable network layer.
|
||||
|
||||
* It can provide service discovery, high availability, load balancing, observability...
|
||||
|
||||
* Service meshes are particularly useful for microservices applications.
|
||||
|
||||
* Service meshes are often implemented as proxies.
|
||||
|
||||
* Applications connect to the service mesh, which relays the connection where needed.
|
||||
|
||||
*Does that sound familiar?*
|
||||
|
||||
---
|
||||
|
||||
## Ambassadors and service meshes
|
||||
|
||||
* When using a service mesh, a "sidecar container" is often used as a proxy
|
||||
|
||||
* Our services connect (transparently) to that sidecar container
|
||||
|
||||
* That sidecar container figures out where to forward the traffic
|
||||
|
||||
... Does that sound familiar?
|
||||
|
||||
(It should, because service meshes are essentially app-wide or cluster-wide ambassadors!)
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
@@ -168,3 +198,10 @@ For more information about the ambassador pattern, including demos on Swarm and
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
@@ -36,7 +36,7 @@ docker run jpetazzo/hamba 80 www1:80 www2:80
|
||||
|
||||
* Appropriate for mandatory parameters (without which the service cannot start).
|
||||
|
||||
* Convenient for "toolbelt" services instanciated many times.
|
||||
* Convenient for "toolbelt" services instantiated many times.
|
||||
|
||||
(Because there is no extra step: just run it!)
|
||||
|
||||
@@ -63,7 +63,7 @@ docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
|
||||
|
||||
* Appropriate for optional parameters (since the image can provide default values).
|
||||
|
||||
* Also convenient for services instanciated many times.
|
||||
* Also convenient for services instantiated many times.
|
||||
|
||||
(It's as easy as command-line parameters.)
|
||||
|
||||
|
||||
@@ -144,6 +144,10 @@ At a first glance, it looks like this would be particularly useful in scripts.
|
||||
However, if we want to start a container and get its ID in a reliable way,
|
||||
it is better to use `docker run -d`, which we will cover in a bit.
|
||||
|
||||
(Using `docker ps -lq` is prone to race conditions: what happens if someone
|
||||
else, or another program or script, starts another container just before
|
||||
we run `docker ps -lq`?)
|
||||
|
||||
---
|
||||
|
||||
## View the logs of a container
|
||||
|
||||
@@ -78,7 +78,7 @@ First step: clone the source code for the app we will be working on.
|
||||
|
||||
```bash
|
||||
$ cd
|
||||
$ git clone git://github.com/jpetazzo/trainingwheels
|
||||
$ git clone https://github.com/jpetazzo/trainingwheels
|
||||
...
|
||||
$ cd trainingwheels
|
||||
```
|
||||
|
||||
@@ -67,7 +67,8 @@ The following list is not exhaustive.
|
||||
|
||||
Furthermore, we limited the scope to Linux containers.
|
||||
|
||||
Containers also exist (sometimes with other names) on Windows, macOS, Solaris, FreeBSD ...
|
||||
We can also find containers (or things that look like containers) on other platforms
|
||||
like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
@@ -155,6 +156,36 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
---
|
||||
|
||||
## Kata containers
|
||||
|
||||
* OCI-compliant runtime.
|
||||
|
||||
* Fusion of two projects: Intel Clear Containers and Hyper runV.
|
||||
|
||||
* Run each container in a lightweight virtual machine.
|
||||
|
||||
* Requires to run on bare metal *or* with nested virtualization.
|
||||
|
||||
---
|
||||
|
||||
## gVisor
|
||||
|
||||
* OCI-compliant runtime.
|
||||
|
||||
* Implements a subset of the Linux kernel system calls.
|
||||
|
||||
* Written in go, uses a smaller subset of system calls.
|
||||
|
||||
* Can be heavily sandboxed.
|
||||
|
||||
* Can run in two modes:
|
||||
|
||||
* KVM (requires bare metal or nested virtualization),
|
||||
|
||||
* ptrace (no requirement, but slower).
|
||||
|
||||
---
|
||||
|
||||
## Overall ...
|
||||
|
||||
* The Docker Engine is very developer-centric:
|
||||
@@ -174,4 +205,3 @@ We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
- Docker is a good default choice
|
||||
|
||||
- If you use Kubernetes, the engine doesn't matter
|
||||
|
||||
|
||||
@@ -721,3 +721,20 @@ eth0 Link encap:Ethernet HWaddr 02:42:AC:15:00:03
|
||||
...
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Building with a custom network
|
||||
|
||||
* We can build a Dockerfile with a custom network with `docker build --network NAME`.
|
||||
|
||||
* This can be used to check that a build doesn't access the network.
|
||||
|
||||
(But keep in mind that most Dockerfiles will fail,
|
||||
<br/>because they need to install remote packages and dependencies!)
|
||||
|
||||
* This may be used to access an internal package repository.
|
||||
|
||||
(But try to use a multi-stage build instead, if possible!)
|
||||
|
||||
@@ -169,5 +169,5 @@ Would we give the same answers to the questions on the previous slide?
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
@@ -66,14 +66,6 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Multiple containers sharing the same image
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Differences between containers and images
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
@@ -88,6 +80,14 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Multiple containers sharing the same image
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Comparison with object-oriented programming
|
||||
|
||||
* Images are conceptually similar to *classes*.
|
||||
@@ -118,7 +118,7 @@ If an image is read-only, how do we change it?
|
||||
|
||||
* The only way to create an image is by "freezing" a container.
|
||||
|
||||
* The only way to create a container is by instanciating an image.
|
||||
* The only way to create a container is by instantiating an image.
|
||||
|
||||
* Help!
|
||||
|
||||
@@ -216,7 +216,7 @@ clock
|
||||
|
||||
---
|
||||
|
||||
## Self-Hosted namespace
|
||||
## Self-hosted namespace
|
||||
|
||||
This namespace holds images which are not hosted on Docker Hub, but on third
|
||||
party registries.
|
||||
@@ -233,6 +233,13 @@ localhost:5000/wordpress
|
||||
* `localhost:5000` is the host and port of the registry
|
||||
* `wordpress` is the name of the image
|
||||
|
||||
Other examples:
|
||||
|
||||
```bash
|
||||
quay.io/coreos/etcd
|
||||
gcr.io/google-containers/hugo
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How do you store and manage images?
|
||||
@@ -352,6 +359,8 @@ Do specify tags:
|
||||
* To ensure that the same version will be used everywhere.
|
||||
* To ensure repeatability later.
|
||||
|
||||
This is similar to what we would do with `pip install`, `npm install`, etc.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
@@ -194,9 +194,13 @@ will have equal success with Fluent or other logging stacks!*
|
||||
|
||||
- We are going to use a Compose file describing the ELK stack.
|
||||
|
||||
- The Compose file is in the container.training repository on GitHub.
|
||||
|
||||
```bash
|
||||
$ cd ~/container.training/stacks
|
||||
$ docker-compose -f elk.yml up -d
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training
|
||||
$ cd elk
|
||||
$ docker-compose up
|
||||
```
|
||||
|
||||
- Let's have a look at the Compose file while it's deploying.
|
||||
|
||||
@@ -293,3 +293,23 @@ We can achieve even smaller images if we use smaller base images.
|
||||
However, if we use common base images (e.g. if we standardize on `ubuntu`),
|
||||
these common images will be pulled only once per node, so they are
|
||||
virtually "free."
|
||||
|
||||
---
|
||||
|
||||
## Build targets
|
||||
|
||||
* We can also tag an intermediary stage with `docker build --target STAGE --tag NAME`
|
||||
|
||||
* This will create an image (named `NAME`) corresponding to stage `STAGE`
|
||||
|
||||
* This can be used to easily access an intermediary stage for inspection
|
||||
|
||||
(Instead of parsing the output of `docker build` to find out the image ID)
|
||||
|
||||
* This can also be used to describe multiple images from a single Dockerfile
|
||||
|
||||
(Instead of using multiple Dockerfiles, which could go out of sync)
|
||||
|
||||
* Sometimes, we want to inspect a specific intermediary build stage.
|
||||
|
||||
* Or, we want to describe multiple images using a single Dockerfile.
|
||||
|
||||
@@ -243,58 +243,76 @@ Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Can we do better?
|
||||
## Can we do better?
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Yup!
|
||||
## Yup!
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Scheduling with two resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Scheduling with three resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## You need to be good at this
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## But also, you must be quick!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## And be web scale!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## And think outside (?) of the box!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Good luck!
|
||||
|
||||
.center[]
|
||||
|
||||
@@ -91,12 +91,12 @@ class: extra-details
|
||||
|
||||
* We need a Dockerized repository!
|
||||
* Let's go to https://github.com/jpetazzo/trainingwheels and fork it.
|
||||
* Go to the Docker Hub (https://hub.docker.com/).
|
||||
* Select "Create" in the top-right bar, and select "Create Automated Build."
|
||||
* Go to the Docker Hub (https://hub.docker.com/) and sign-in. Select "Repositories" in the blue navigation menu.
|
||||
* Select "Create" in the top-right bar, and select "Create Repository+".
|
||||
* Connect your Docker Hub account to your GitHub account.
|
||||
* Select your user and the repository that we just forked.
|
||||
* Create.
|
||||
* Then go to "Build Settings."
|
||||
* Put `/www` in "Dockerfile Location" (or whichever directory the Dockerfile is in).
|
||||
* Click "Trigger" to build the repository immediately (without waiting for a git push).
|
||||
* Click "Create" button.
|
||||
* Then go to "Builds" folder.
|
||||
* Click on Github icon and select your user and the repository that we just forked.
|
||||
* In "Build rules" block near page bottom, put `/www` in "Build Context" column (or whichever directory the Dockerfile is in).
|
||||
* Click "Save and Build" to build the repository immediately (without waiting for a git push).
|
||||
* Subsequent builds will happen automatically, thanks to GitHub hooks.
|
||||
|
||||
@@ -91,7 +91,7 @@ $ ssh <login>@<ip-address>
|
||||
|
||||
* Git BASH (https://git-for-windows.github.io/)
|
||||
|
||||
* MobaXterm (http://moabaxterm.mobatek.net)
|
||||
* MobaXterm (https://mobaxterm.mobatek.net/)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ Nano and LinuxKit VMs in Hyper-V!)
|
||||
|
||||
- you should still use `--plaform` with multi-os images to be certain
|
||||
|
||||
- Windows Containers now support `localhost` accessable containers (July 2018)
|
||||
- Windows Containers now support `localhost` accessible containers (July 2018)
|
||||
|
||||
- Microsoft (April 2018) added Hyper-V support to Windows 10 Home ...
|
||||
|
||||
@@ -157,8 +157,8 @@ Places to Look:
|
||||
|
||||
- Good Windows Container Blogs and How-To's
|
||||
|
||||
- Dockers DevRel [Elton Stoneman, Microsoft MVP](https://blog.sixeyed.com/)
|
||||
- Docker DevRel [Elton Stoneman, Microsoft MVP](https://blog.sixeyed.com/)
|
||||
|
||||
- Docker Captian [Nicholas Dille](https://dille.name/blog/)
|
||||
- Docker Captain [Nicholas Dille](https://dille.name/blog/)
|
||||
|
||||
- Docker Captain [Stefan Scherer](https://stefanscherer.github.io/)
|
||||
|
||||
@@ -30,7 +30,7 @@ class: self-paced
|
||||
|
||||
- These slides include *tons* of exercises and examples
|
||||
|
||||
- They assume that you have acccess to a machine running Docker
|
||||
- They assume that you have access to a machine running Docker
|
||||
|
||||
- If you are attending a workshop or tutorial:
|
||||
<br/>you will be given specific instructions to access a cloud VM
|
||||
|
||||
@@ -4,6 +4,7 @@ TEMPLATE="""<html>
|
||||
<head>
|
||||
<title>{{ title }}</title>
|
||||
<link rel="stylesheet" href="index.css">
|
||||
<meta charset="UTF-8">
|
||||
</head>
|
||||
<body>
|
||||
<div class="main">
|
||||
@@ -106,26 +107,41 @@ import yaml
|
||||
|
||||
items = yaml.load(open("index.yaml"))
|
||||
|
||||
# Items with a date correspond to scheduled sessions.
|
||||
# Items without a date correspond to self-paced content.
|
||||
# The date should be specified as a string (e.g. 2018-11-26).
|
||||
# It can also be a list of two elements (e.g. [2018-11-26, 2018-11-28]).
|
||||
# The latter indicates an event spanning multiple dates.
|
||||
# The first date will be used in the generated page, but the event
|
||||
# will be considered "current" (and therefore, shown in the list of
|
||||
# upcoming events) until the second date.
|
||||
|
||||
for item in items:
|
||||
if "date" in item:
|
||||
date = item["date"]
|
||||
if type(date) == list:
|
||||
date_begin, date_end = date
|
||||
else:
|
||||
date_begin, date_end = date, date
|
||||
suffix = {
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date.day, "th")
|
||||
31: "st"}.get(date_begin.day, "th")
|
||||
# %e is a non-standard extension (it displays the day, but without a
|
||||
# leading zero). If strftime fails with ValueError, try to fall back
|
||||
# on %d (which displays the day but with a leading zero when needed).
|
||||
try:
|
||||
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
|
||||
item["prettydate"] = date_begin.strftime("%B %e{}, %Y").format(suffix)
|
||||
except ValueError:
|
||||
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
|
||||
item["prettydate"] = date_begin.strftime("%B %d{}, %Y").format(suffix)
|
||||
item["begin"] = date_begin
|
||||
item["end"] = date_end
|
||||
|
||||
today = datetime.date.today()
|
||||
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]
|
||||
coming_soon.sort(key=lambda i: i["date"])
|
||||
past_workshops = [i for i in items if i.get("date") and i["date"] < today]
|
||||
past_workshops.sort(key=lambda i: i["date"], reverse=True)
|
||||
coming_soon = [i for i in items if i.get("date") and i["end"] >= today]
|
||||
coming_soon.sort(key=lambda i: i["begin"])
|
||||
past_workshops = [i for i in items if i.get("date") and i["end"] < today]
|
||||
past_workshops.sort(key=lambda i: i["begin"], reverse=True)
|
||||
self_paced = [i for i in items if not i.get("date")]
|
||||
recorded_workshops = [i for i in items if i.get("video")]
|
||||
|
||||
|
||||
@@ -1,3 +1,37 @@
|
||||
- date: 2019-04-28
|
||||
country: us
|
||||
city: Chicago, IL
|
||||
event: GOTO
|
||||
speaker: jpetazzo
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
attend: https://gotochgo.com/2019/workshops/148
|
||||
|
||||
- date: 2019-03-07
|
||||
country: uk
|
||||
city: London
|
||||
event: QCON
|
||||
speaker: jpetazzo
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
attend: https://qconlondon.com/london2019/workshop/getting-started-kubernetes-and-container-orchestration
|
||||
|
||||
- date: [2019-01-07, 2019-01-08]
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: "jpetazzo, alexbuisine"
|
||||
title: Bien démarrer avec les conteneurs (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/bien-demarrer-avec-les-conteneurs/
|
||||
|
||||
- date: [2018-12-17, 2018-12-18]
|
||||
country: fr
|
||||
city: Paris
|
||||
event: ENIX SAS
|
||||
speaker: "jpetazzo, rdegez"
|
||||
title: Déployer ses applications avec Kubernetes (in French)
|
||||
lang: fr
|
||||
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
|
||||
|
||||
- date: 2018-11-08
|
||||
city: San Francisco, CA
|
||||
country: us
|
||||
@@ -13,6 +47,7 @@
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration-thursday-section
|
||||
slides: http://qconsf2018.container.training/
|
||||
|
||||
- date: 2018-11-09
|
||||
city: San Francisco, CA
|
||||
@@ -21,6 +56,7 @@
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration-friday-section
|
||||
slides: http://qconsf2018.container.training/
|
||||
|
||||
- date: 2018-10-31
|
||||
city: London, UK
|
||||
|
||||
@@ -133,6 +133,8 @@ class: extra-details
|
||||
|
||||
→ We are user `kubernetes-admin`, in group `system:masters`.
|
||||
|
||||
(We will see later how and why this gives us the permissions that we have.)
|
||||
|
||||
---
|
||||
|
||||
## User certificates in practice
|
||||
@@ -538,7 +540,7 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
- But that we can't create things:
|
||||
```
|
||||
./kubectl run tryme --image=nginx
|
||||
./kubectl create deployment testrbac --image=nginx
|
||||
```
|
||||
|
||||
- Exit the container with `exit` or `^D`
|
||||
@@ -567,3 +569,45 @@ It's important to note a couple of details in these flags ...
|
||||
kubectl auth can-i list nodes \
|
||||
--as system:serviceaccount:<namespace>:<name-of-service-account>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where do our permissions come from?
|
||||
|
||||
- When interacting with the Kubernetes API, we are using a client certificate
|
||||
|
||||
- We saw previously that this client certificate contained:
|
||||
|
||||
`CN=kubernetes-admin` and `O=system:masters`
|
||||
|
||||
- Let's look for these in existing ClusterRoleBindings:
|
||||
```bash
|
||||
kubectl get clusterrolebindings -o yaml |
|
||||
grep -e kubernetes-admin -e system:masters
|
||||
```
|
||||
|
||||
(`system:masters` should show up, but not `kubernetes-admin`.)
|
||||
|
||||
- Where does this match come from?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The `system:masters` group
|
||||
|
||||
- If we eyeball the output of `kubectl get clusterrolebindings -o yaml`, we'll find out!
|
||||
|
||||
- It is in the `cluster-admin` binding:
|
||||
```bash
|
||||
kubectl describe clusterrolebinding cluster-admin
|
||||
```
|
||||
|
||||
- This binding associates `system:masters` to the cluster role `cluster-admin`
|
||||
|
||||
- And the `cluster-admin` is, basically, `root`:
|
||||
```bash
|
||||
kubectl describe clusterrole cluster-admin
|
||||
```
|
||||
|
||||
@@ -252,38 +252,29 @@ The master node has [taints](https://kubernetes.io/docs/concepts/configuration/t
|
||||
|
||||
---
|
||||
|
||||
## What are all these pods doing?
|
||||
## Is this working?
|
||||
|
||||
- Let's check the logs of all these `rng` pods
|
||||
|
||||
- All these pods have a `run=rng` label:
|
||||
|
||||
- the first pod, because that's what `kubectl run` does
|
||||
- the other ones (in the daemon set), because we
|
||||
*copied the spec from the first one*
|
||||
|
||||
- Therefore, we can query everybody's logs using that `run=rng` selector
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of all the pods having a label `run=rng`:
|
||||
```bash
|
||||
kubectl logs -l run=rng --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
- Look at the web UI
|
||||
|
||||
--
|
||||
|
||||
It appears that *all the pods* are serving requests at the moment.
|
||||
- The graph should now go above 10 hashes per second!
|
||||
|
||||
--
|
||||
|
||||
- It looks like the newly created pods are serving traffic correctly
|
||||
|
||||
- How and why did this happen?
|
||||
|
||||
(We didn't do anything special to add them to the `rng` service load balancer!)
|
||||
|
||||
---
|
||||
|
||||
## The magic of selectors
|
||||
# Labels and selectors
|
||||
|
||||
- The `rng` *service* is load balancing requests to a set of pods
|
||||
|
||||
- This set of pods is defined as "pods having the label `run=rng`"
|
||||
- That set of pods is defined by the *selector* of the `rng` service
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -294,110 +285,333 @@ It appears that *all the pods* are serving requests at the moment.
|
||||
|
||||
]
|
||||
|
||||
When we created additional pods with this label, they were
|
||||
automatically detected by `svc/rng` and added as *endpoints*
|
||||
to the associated load balancer.
|
||||
- The selector is `app=rng`
|
||||
|
||||
- It means "all the pods having the label `app=rng`"
|
||||
|
||||
(They can have additional labels as well, that's OK!)
|
||||
|
||||
---
|
||||
|
||||
## Removing the first pod from the load balancer
|
||||
## Selector evaluation
|
||||
|
||||
- We can use selectors with many `kubectl` commands
|
||||
|
||||
- For instance, with `kubectl get`, `kubectl logs`, `kubectl delete` ... and more
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the list of pods matching selector `app=rng`:
|
||||
```bash
|
||||
kubectl get pods -l app=rng
|
||||
kubectl get pods --selector app=rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
But ... why do these pods (in particular, the *new* ones) have this `app=rng` label?
|
||||
|
||||
---
|
||||
|
||||
## Where do labels come from?
|
||||
|
||||
- When we create a deployment with `kubectl create deployment rng`,
|
||||
<br/>this deployment gets the label `app=rng`
|
||||
|
||||
- The replica sets created by this deployment also get the label `app=rng`
|
||||
|
||||
- The pods created by these replica sets also get the label `app=rng`
|
||||
|
||||
- When we created the daemon set from the deployment, we re-used the same spec
|
||||
|
||||
- Therefore, the pods created by the daemon set get the same labels
|
||||
|
||||
.footnote[Note: when we use `kubectl run stuff`, the label is `run=stuff` instead.]
|
||||
|
||||
---
|
||||
|
||||
## Updating load balancer configuration
|
||||
|
||||
- We would like to remove a pod from the load balancer
|
||||
|
||||
- What would happen if we removed that pod, with `kubectl delete pod ...`?
|
||||
|
||||
--
|
||||
|
||||
The `replicaset` would re-create it immediately.
|
||||
It would be re-created immediately (by the replica set or the daemon set)
|
||||
|
||||
--
|
||||
|
||||
- What would happen if we removed the `run=rng` label from that pod?
|
||||
- What would happen if we removed the `app=rng` label from that pod?
|
||||
|
||||
--
|
||||
|
||||
The `replicaset` would re-create it immediately.
|
||||
It would *also* be re-created immediately
|
||||
|
||||
--
|
||||
|
||||
... Because what matters to the `replicaset` is the number of pods *matching that selector.*
|
||||
|
||||
--
|
||||
|
||||
- But but but ... Don't we have more than one pod with `run=rng` now?
|
||||
|
||||
--
|
||||
|
||||
The answer lies in the exact selector used by the `replicaset` ...
|
||||
Why?!?
|
||||
|
||||
---
|
||||
|
||||
## Deep dive into selectors
|
||||
## Selectors for replica sets and daemon sets
|
||||
|
||||
- Let's look at the selectors for the `rng` *deployment* and the associated *replica set*
|
||||
- The "mission" of a replica set is:
|
||||
|
||||
"Make sure that there is the right number of pods matching this spec!"
|
||||
|
||||
- The "mission" of a daemon set is:
|
||||
|
||||
"Make sure that there is a pod matching this spec on each node!"
|
||||
|
||||
--
|
||||
|
||||
- *In fact,* replica sets and daemon sets do not check pod specifications
|
||||
|
||||
- They merely have a *selector*, and they look for pods matching that selector
|
||||
|
||||
- Yes, we can fool them by manually creating pods with the "right" labels
|
||||
|
||||
- Bottom line: if we remove our `app=rng` label ...
|
||||
|
||||
... The pod "diseappears" for its parent, which re-creates another pod to replace it
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Isolation of replica sets and daemon sets
|
||||
|
||||
- Since both the `rng` daemon set and the `rng` replica set use `app=rng` ...
|
||||
|
||||
... Why don't they "find" each other's pods?
|
||||
|
||||
--
|
||||
|
||||
- *Replica sets* have a more specific selector, visible with `kubectl describe`
|
||||
|
||||
(It looks like `app=rng,pod-template-hash=abcd1234`)
|
||||
|
||||
- *Daemon sets* also have a more specific selector, but it's invisible
|
||||
|
||||
(It looks like `app=rng,controller-revision-hash=abcd1234`)
|
||||
|
||||
- As a result, each controller only "sees" the pods it manages
|
||||
|
||||
---
|
||||
|
||||
## Removing a pod from the load balancer
|
||||
|
||||
- Currently, the `rng` service is defined by the `app=rng` selector
|
||||
|
||||
- The only way to remove a pod is to remove or change the `app` label
|
||||
|
||||
- ... But that will cause another pod to be created instead!
|
||||
|
||||
- What's the solution?
|
||||
|
||||
--
|
||||
|
||||
- We need to change the selector of the `rng` service!
|
||||
|
||||
- Let's add another label to that selector (e.g. `enabled=yes`)
|
||||
|
||||
---
|
||||
|
||||
## Complex selectors
|
||||
|
||||
- If a selector specifies multiple labels, they are understood as a logical *AND*
|
||||
|
||||
(In other words: the pods must match all the labels)
|
||||
|
||||
- Kubernetes has support for advanced, set-based selectors
|
||||
|
||||
(But these cannot be used with services, at least not yet!)
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
1. Add the label `enabled=yes` to all our `rng` pods
|
||||
|
||||
2. Update the selector for the `rng` service to also include `enabled=yes`
|
||||
|
||||
3. Toggle traffic to a pod by manually adding/removing the `enabled` label
|
||||
|
||||
4. Profit!
|
||||
|
||||
*Note: if we swap steps 1 and 2, it will cause a short
|
||||
service disruption, because there will be a period of time
|
||||
during which the service selector won't match any pod.
|
||||
During that time, requests to the service will time out.
|
||||
By doing things in the order above, we guarantee that there won't
|
||||
be any interruption.*
|
||||
|
||||
---
|
||||
|
||||
## Adding labels to pods
|
||||
|
||||
- We want to add the label `enabled=yes` to all pods that have `app=rng`
|
||||
|
||||
- We could edit each pod one by one with `kubectl edit` ...
|
||||
|
||||
- ... Or we could use `kubectl label` to label them all
|
||||
|
||||
- `kubectl label` can use selectors itself
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show detailed information about the `rng` deployment:
|
||||
- Add `enabled=yes` to all pods that have `app=rng`:
|
||||
```bash
|
||||
kubectl describe deploy rng
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
- Show detailed information about the `rng` replica:
|
||||
<br/>(The second command doesn't require you to get the exact name of the replica set)
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the service selector
|
||||
|
||||
- We need to edit the service specification
|
||||
|
||||
- Reminder: in the service definition, we will see `app: rng` in two places
|
||||
|
||||
- the label of the service itself (we don't need to touch that one)
|
||||
|
||||
- the selector of the service (that's the one we want to change)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the service to add `enabled: yes` to its selector:
|
||||
```bash
|
||||
kubectl describe rs rng-yyyyyyyy
|
||||
kubectl describe rs -l run=rng
|
||||
kubectl edit service rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```keys ^J```
|
||||
```keys noenabled: yes```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The replica set selector also has a `pod-template-hash`, unlike the pods in our daemon set.
|
||||
... And then we get *the weirdest error ever.* Why?
|
||||
|
||||
---
|
||||
|
||||
# Updating a service through labels and selectors
|
||||
## When the YAML parser is being too smart
|
||||
|
||||
- What if we want to drop the `rng` deployment from the load balancer?
|
||||
- YAML parsers try to help us:
|
||||
|
||||
- Option 1:
|
||||
- `xyz` is the string `"xyz"`
|
||||
|
||||
- destroy it
|
||||
- `42` is the integer `42`
|
||||
|
||||
- Option 2:
|
||||
- `yes` is the boolean value `true`
|
||||
|
||||
- add an extra *label* to the daemon set
|
||||
- If we want the string `"42"` or the string `"yes"`, we have to quote them
|
||||
|
||||
- update the service *selector* to refer to that *label*
|
||||
- So we have to use `enabled: "yes"`
|
||||
|
||||
--
|
||||
|
||||
Of course, option 2 offers more learning opportunities. Right?
|
||||
.footnote[For a good laugh: if we had used "ja", "oui", "si" ... as the value, it would have worked!]
|
||||
|
||||
---
|
||||
|
||||
## Add an extra label to the daemon set
|
||||
## Updating the service selector, take 2
|
||||
|
||||
- We will update the daemon set "spec"
|
||||
.exercise[
|
||||
|
||||
- Option 1:
|
||||
- Update the service to add `enabled: "yes"` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
|
||||
- edit the `rng.yml` file that we used earlier
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /app: rng```
|
||||
```keys ^J```
|
||||
```keys noenabled: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- load the new definition with `kubectl apply`
|
||||
]
|
||||
|
||||
- Option 2:
|
||||
This time it should work!
|
||||
|
||||
- use `kubectl edit`
|
||||
|
||||
--
|
||||
|
||||
*If you feel like you got this💕🌈, feel free to try directly.*
|
||||
|
||||
*We've included a few hints on the next slides for your convenience!*
|
||||
If we did everything correctly, the web UI shouldn't show any change.
|
||||
|
||||
---
|
||||
|
||||
## Updating labels
|
||||
|
||||
- We want to disable the pod that was created by the deployment
|
||||
|
||||
- All we have to do, is remove the `enabled` label from that pod
|
||||
|
||||
- To identify that pod, we can use its name
|
||||
|
||||
- ... Or rely on the fact that it's the only one with a `pod-template-hash` label
|
||||
|
||||
- Good to know:
|
||||
|
||||
- `kubectl label ... foo=` doesn't remove a label (it sets it to an empty string)
|
||||
|
||||
- to remove label `foo`, use `kubectl label ... foo-`
|
||||
|
||||
- to change an existing label, we would need to add `--overwrite`
|
||||
|
||||
---
|
||||
|
||||
## Removing a pod from the load balancer
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one window, check the logs of that pod:
|
||||
```bash
|
||||
POD=$(kubectl get pod -l app=rng,pod-template-hash -o name)
|
||||
kubectl logs --tail 1 --follow $POD
|
||||
|
||||
```
|
||||
(We should see a steady stream of HTTP logs)
|
||||
|
||||
- In another window, remove the label from the pod:
|
||||
```bash
|
||||
kubectl label pod -l app=rng,pod-template-hash enabled-
|
||||
```
|
||||
(The stream of HTTP logs should stop immediately)
|
||||
|
||||
]
|
||||
|
||||
There might be a slight change in the web UI (since we removed a bit
|
||||
of capacity from the `rng` service). If we remove more pods,
|
||||
the effect should be more visible.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Updating the daemon set
|
||||
|
||||
- If we scale up our cluster by adding new nodes, the daemon set will create more pods
|
||||
|
||||
- These pods won't have the `enabled=yes` label
|
||||
|
||||
- If we want these pods to have that label, we need to edit the daemon set spec
|
||||
|
||||
- We can do that with e.g. `kubectl edit daemonset rng`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## We've put resources in your resources
|
||||
|
||||
- Reminder: a daemon set is a resource that creates more resources!
|
||||
@@ -410,7 +624,9 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
- the label(s) of the resource(s) created by the first resource (in the `template` block)
|
||||
|
||||
- You need to update the selector and the template (metadata labels are not mandatory)
|
||||
- We would need to update the selector and the template
|
||||
|
||||
(metadata labels are not mandatory)
|
||||
|
||||
- The template must match the selector
|
||||
|
||||
@@ -418,175 +634,6 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
---
|
||||
|
||||
## Adding our label
|
||||
|
||||
- Let's add a label `isactive: yes`
|
||||
|
||||
- In YAML, `yes` should be quoted; i.e. `isactive: "yes"`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the daemon set to add `isactive: "yes"` to the selector and template label:
|
||||
```bash
|
||||
kubectl edit daemonset rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys oisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Update the service to add `isactive: "yes"` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the most recent log line of all `run=rng` pods to confirm that exactly one per node is now active:
|
||||
```bash
|
||||
kubectl logs -l run=rng --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The timestamps should give us a hint about how many pods are currently receiving traffic.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the pods that we have right now:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up
|
||||
|
||||
- The pods of the deployment and the "old" daemon set are still running
|
||||
|
||||
- We are going to identify them programmatically
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the pods with `run=rng` but without `isactive=yes`:
|
||||
```bash
|
||||
kubectl get pods -l run=rng,isactive!=yes
|
||||
```
|
||||
|
||||
- Remove these pods:
|
||||
```bash
|
||||
kubectl delete pods -l run=rng,isactive!=yes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up stale pods
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
rng-54f57d4d49-7pt82 1/1 Terminating 0 51m
|
||||
rng-54f57d4d49-vgz9h 1/1 Running 0 22s
|
||||
rng-b85tm 1/1 Terminating 0 39m
|
||||
rng-hfbrr 1/1 Terminating 0 39m
|
||||
rng-vplmj 1/1 Running 0 7m
|
||||
rng-xbpvg 1/1 Running 0 7m
|
||||
[...]
|
||||
```
|
||||
|
||||
- The extra pods (noted `Terminating` above) are going away
|
||||
|
||||
- ... But a new one (`rng-54f57d4d49-vgz9h` above) was restarted immediately!
|
||||
|
||||
--
|
||||
|
||||
- Remember, the *deployment* still exists, and makes sure that one pod is up and running
|
||||
|
||||
- If we delete the pod associated to the deployment, it is recreated automatically
|
||||
|
||||
---
|
||||
|
||||
## Deleting a deployment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the `rng` deployment:
|
||||
```bash
|
||||
kubectl delete deployment rng
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- The pod that was created by the deployment is now being terminated:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
rng-54f57d4d49-vgz9h 1/1 Terminating 0 4m
|
||||
rng-vplmj 1/1 Running 0 11m
|
||||
rng-xbpvg 1/1 Running 0 11m
|
||||
[...]
|
||||
```
|
||||
|
||||
Ding, dong, the deployment is dead! And the daemon set lives on.
|
||||
|
||||
---
|
||||
|
||||
## Avoiding extra pods
|
||||
|
||||
- When we changed the definition of the daemon set, it immediately created new pods. We had to remove the old ones manually.
|
||||
|
||||
- How could we have avoided this?
|
||||
|
||||
--
|
||||
|
||||
- By adding the `isactive: "yes"` label to the pods before changing the daemon set!
|
||||
|
||||
- This can be done programmatically with `kubectl patch`:
|
||||
|
||||
```bash
|
||||
PATCH='
|
||||
metadata:
|
||||
labels:
|
||||
isactive: "yes"
|
||||
'
|
||||
kubectl get pods -l run=rng -l controller-revision-hash -o name |
|
||||
xargs kubectl patch -p "$PATCH"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Labels and debugging
|
||||
|
||||
- When a pod is misbehaving, we can delete it: another one will be recreated
|
||||
|
||||
@@ -392,9 +392,9 @@ This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
- Run all three deployments:
|
||||
```bash
|
||||
kubectl run cheddar --image=errm/cheese:cheddar
|
||||
kubectl run stilton --image=errm/cheese:stilton
|
||||
kubectl run wensleydale --image=errm/cheese:wensleydale
|
||||
kubectl create deployment cheddar --image=errm/cheese:cheddar
|
||||
kubectl create deployment stilton --image=errm/cheese:stilton
|
||||
kubectl create deployment wensleydale --image=errm/cheese:wensleydale
|
||||
```
|
||||
|
||||
- Create a service for each of them:
|
||||
|
||||
@@ -57,31 +57,49 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
- We could use the `nginx` official image, but ...
|
||||
|
||||
... we wouldn't be able to tell the backends from each other!
|
||||
|
||||
- We are going to use `jpetazzo/httpenv`, a tiny HTTP server written in Go
|
||||
|
||||
- `jpetazzo/httpenv` listens on port 8888
|
||||
|
||||
- It serves its environment variables in JSON format
|
||||
|
||||
- The environment variables will include `HOSTNAME`, which will be the pod name
|
||||
|
||||
(and therefore, will be different on each backend)
|
||||
|
||||
---
|
||||
|
||||
## Creating a deployment for our HTTP server
|
||||
|
||||
- We *could* do `kubectl run httpenv --image=jpetazzo/httpenv` ...
|
||||
|
||||
- But since `kubectl run` is being deprecated, let's see how to use `kubectl create` instead
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start a bunch of HTTP servers:
|
||||
```bash
|
||||
kubectl run httpenv --image=jpetazzo/httpenv --replicas=10
|
||||
```
|
||||
|
||||
- Watch them being started:
|
||||
- In another window, watch the pods (to see when they will be created):
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait httpenv-```
|
||||
```keys ^C```
|
||||
-->
|
||||
<!-- ```keys ^C``` -->
|
||||
|
||||
- Create a deployment for this very lightweight HTTP server:
|
||||
```bash
|
||||
kubectl create deployment httpenv --image=jpetazzo/httpenv
|
||||
```
|
||||
|
||||
- Scale it to 10 replicas:
|
||||
```bash
|
||||
kubectl scale deployment httpenv --replicas=10
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The `jpetazzo/httpenv` image runs an HTTP server on port 8888.
|
||||
<br/>
|
||||
It serves its environment variables in JSON format.
|
||||
|
||||
The `-w` option "watches" events happening on the specified resources.
|
||||
|
||||
---
|
||||
|
||||
## Exposing our deployment
|
||||
@@ -92,12 +110,12 @@ The `-w` option "watches" events happening on the specified resources.
|
||||
|
||||
- Expose the HTTP port of our server:
|
||||
```bash
|
||||
kubectl expose deploy/httpenv --port 8888
|
||||
kubectl expose deployment httpenv --port 8888
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
```bash
|
||||
kubectl get svc
|
||||
kubectl get service
|
||||
```
|
||||
|
||||
]
|
||||
@@ -237,7 +255,7 @@ class: extra-details
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l run=httpenv -o wide
|
||||
kubectl get pods -l app=httpenv -o wide
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -173,6 +173,11 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
|
||||
kubectl scale deploy/pingpong --replicas 8
|
||||
```
|
||||
|
||||
- Note that this command does exactly the same thing:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas 8
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: what if we tried to scale `replicaset.apps/pingpong-xxxxxxxxxx`?
|
||||
@@ -290,6 +295,24 @@ Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl logs -l ... --tail N`
|
||||
|
||||
- If we run this with Kubernetes 1.12, the last command shows multiple lines
|
||||
|
||||
- This is a regression when `--tail` is used together with `-l`/`--selector`
|
||||
|
||||
- It always shows the last 10 lines of output for each container
|
||||
|
||||
(instead of the number of lines specified on the command line)
|
||||
|
||||
- The problem was fixed in Kubernetes 1.13
|
||||
|
||||
*See [#70554](https://github.com/kubernetes/kubernetes/issues/70554) for details.*
|
||||
|
||||
---
|
||||
|
||||
## Aren't we flooding 1.1.1.1?
|
||||
|
||||
- If you're wondering this, good question!
|
||||
|
||||
@@ -12,13 +12,15 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.13.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.13.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.13.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
|
||||
@@ -62,10 +62,12 @@ Exactly what we need!
|
||||
- The following commands will install Stern on a Linux Intel 64 bit machine:
|
||||
```bash
|
||||
sudo curl -L -o /usr/local/bin/stern \
|
||||
https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
|
||||
https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
---
|
||||
|
||||
## Using Stern
|
||||
@@ -130,11 +132,13 @@ Exactly what we need!
|
||||
|
||||
- We can use that property to view the logs of all the pods created with `kubectl run`
|
||||
|
||||
- Similarly, everything created with `kubectl create deployment` has a label `app`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the logs for all the things started with `kubectl run`:
|
||||
- View the logs for all the things started with `kubectl create deployment`:
|
||||
```bash
|
||||
stern -l run
|
||||
stern -l app
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
kubectl -n blue get svc
|
||||
```
|
||||
|
||||
- We can also use *contexts*
|
||||
- We can also change our current *context*
|
||||
|
||||
- A context is a *(user, cluster, namespace)* tuple
|
||||
|
||||
@@ -76,9 +76,9 @@
|
||||
|
||||
---
|
||||
|
||||
## Creating a context
|
||||
## Viewing existing contexts
|
||||
|
||||
- We are going to create a context for the `blue` namespace
|
||||
- On our training environments, at this point, there should be only one context
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -87,29 +87,79 @@
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
- Create a new context:
|
||||
]
|
||||
|
||||
- The current context (the only one!) is tagged with a `*`
|
||||
|
||||
- What are NAME, CLUSTER, AUTHINFO, and NAMESPACE?
|
||||
|
||||
---
|
||||
|
||||
## What's in a context
|
||||
|
||||
- NAME is an arbitrary string to identify the context
|
||||
|
||||
- CLUSTER is a reference to a cluster
|
||||
|
||||
(i.e. API endpoint URL, and optional certificate)
|
||||
|
||||
- AUTHINFO is a reference to the authentication information to use
|
||||
|
||||
(i.e. a TLS client certificate, token, or otherwise)
|
||||
|
||||
- NAMESPACE is the namespace
|
||||
|
||||
(empty string = `default`)
|
||||
|
||||
---
|
||||
|
||||
## Switching contexts
|
||||
|
||||
- We want to use a different namespace
|
||||
|
||||
- Solution 1: update the current context
|
||||
|
||||
*This is appropriate if we need to change just one thing (e.g. namespace or authentication).*
|
||||
|
||||
- Solution 2: create a new context and switch to it
|
||||
|
||||
*This is appropriate if we need to change multiple things and switch back and forth.*
|
||||
|
||||
- Let's go with solution 1!
|
||||
|
||||
---
|
||||
|
||||
## Updating a context
|
||||
|
||||
- This is done through `kubectl config set-context`
|
||||
|
||||
- We can update a context by passing its name, or the current context with `--current`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the current context to use the `blue` namespace:
|
||||
```bash
|
||||
kubectl config set-context blue --namespace=blue \
|
||||
--cluster=kubernetes --user=kubernetes-admin
|
||||
kubectl config set-context --current --namespace=blue
|
||||
```
|
||||
|
||||
- Check the result:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We have created a context; but this is just some configuration values.
|
||||
|
||||
The namespace doesn't exist yet.
|
||||
|
||||
---
|
||||
|
||||
## Using a context
|
||||
## Using our new namespace
|
||||
|
||||
- Let's switch to our new context and deploy the DockerCoins chart
|
||||
- Let's check that we are in our new namespace, then deploy the DockerCoins chart
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use the `blue` context:
|
||||
- Verify that the new context is empty:
|
||||
```bash
|
||||
kubectl config use-context blue
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
@@ -181,30 +231,19 @@ Note: it might take a minute or two for the app to be up and running.
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the names of the contexts:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
- Switch back to the original context:
|
||||
```bash
|
||||
kubectl config use-context kubernetes-admin@kubernetes
|
||||
kubectl config set-context --current --namespace=
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: we could have used `--namespace=default` for the same result.
|
||||
|
||||
---
|
||||
|
||||
## Switching namespaces more easily
|
||||
|
||||
- Defining a new context for each namespace can be cumbersome
|
||||
|
||||
- We can also alter the current context with this one-liner:
|
||||
|
||||
```bash
|
||||
kubectl config set-context --current --namespace=foo
|
||||
```
|
||||
|
||||
- We can also use a little helper tool called `kubens`:
|
||||
|
||||
```bash
|
||||
@@ -214,6 +253,10 @@ Note: it might take a minute or two for the app to be up and running.
|
||||
kubens -
|
||||
```
|
||||
|
||||
- On our clusters, `kubens` is called `kns` instead
|
||||
|
||||
(so that it's even fewer keystrokes to switch namespaces)
|
||||
|
||||
---
|
||||
|
||||
## `kubens` and `kubectx`
|
||||
|
||||
@@ -117,13 +117,13 @@ This is our game plan:
|
||||
|
||||
- Let's use the `nginx` image:
|
||||
```bash
|
||||
kubectl run testweb --image=nginx
|
||||
kubectl create deployment testweb --image=nginx
|
||||
```
|
||||
|
||||
- Find out the IP address of the pod with one of these two commands:
|
||||
```bash
|
||||
kubectl get pods -o wide -l run=testweb
|
||||
IP=$(kubectl get pods -l run=testweb -o json | jq -r .items[0].status.podIP)
|
||||
kubectl get pods -o wide -l app=testweb
|
||||
IP=$(kubectl get pods -l app=testweb -o json | jq -r .items[0].status.podIP)
|
||||
```
|
||||
|
||||
- Check that we can connect to the server:
|
||||
@@ -138,7 +138,7 @@ The `curl` command should show us the "Welcome to nginx!" page.
|
||||
|
||||
## Adding a very restrictive network policy
|
||||
|
||||
- The policy will select pods with the label `run=testweb`
|
||||
- The policy will select pods with the label `app=testweb`
|
||||
|
||||
- It will specify an empty list of ingress rules (matching nothing)
|
||||
|
||||
@@ -172,7 +172,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress: []
|
||||
```
|
||||
|
||||
@@ -207,7 +207,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
@@ -247,9 +247,9 @@ The second command will fail and time out after 3 seconds.
|
||||
|
||||
- Some network plugins only have partial support for network policies
|
||||
|
||||
- For instance, Weave [doesn't support ipBlock (yet)](https://github.com/weaveworks/weave/issues/3168)
|
||||
- For instance, Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
|
||||
|
||||
- Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
|
||||
- But only recently added support for ipBlock [in version 2.5](https://github.com/weaveworks/weave/pull/3367) (released in Nov 2018)
|
||||
|
||||
- Unsupported features might be silently ignored
|
||||
|
||||
@@ -325,7 +325,7 @@ spec:
|
||||
|
||||
## Allowing traffic to `webui` pods
|
||||
|
||||
This policy selects all pods with label `run=webui`.
|
||||
This policy selects all pods with label `app=webui`.
|
||||
|
||||
It allows traffic from any source.
|
||||
|
||||
@@ -339,7 +339,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: webui
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
```
|
||||
|
||||
@@ -1,12 +1,42 @@
|
||||
class: title
|
||||
# Shipping images with a registry
|
||||
|
||||
Our app on Kube
|
||||
- Initially, our app was running on a single node
|
||||
|
||||
- We could *build* and *run* in the same place
|
||||
|
||||
- Therefore, we did not need to *ship* anything
|
||||
|
||||
- Now that we want to run on a cluster, things are different
|
||||
|
||||
- The easiest way to ship container images is to use a registry
|
||||
|
||||
---
|
||||
|
||||
## What's on the menu?
|
||||
## How Docker registries work (a reminder)
|
||||
|
||||
In this part, we will:
|
||||
- What happens when we execute `docker run alpine` ?
|
||||
|
||||
- If the Engine needs to pull the `alpine` image, it expands it into `library/alpine`
|
||||
|
||||
- `library/alpine` is expanded into `index.docker.io/library/alpine`
|
||||
|
||||
- The Engine communicates with `index.docker.io` to retrieve `library/alpine:latest`
|
||||
|
||||
- To use something else than `index.docker.io`, we specify it in the image name
|
||||
|
||||
- Examples:
|
||||
```bash
|
||||
docker pull gcr.io/google-containers/alpine-with-bash:1.0
|
||||
|
||||
docker build -t registry.mycompany.io:5000/myimage:awesome .
|
||||
docker push registry.mycompany.io:5000/myimage:awesome
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
We are going to:
|
||||
|
||||
- **build** images for our app,
|
||||
|
||||
@@ -14,25 +44,42 @@ In this part, we will:
|
||||
|
||||
- **run** deployments using these images,
|
||||
|
||||
- expose these deployments so they can communicate with each other,
|
||||
- expose (with a ClusterIP) the deployments that need to communicate together,
|
||||
|
||||
- expose the web UI so we can access it from outside.
|
||||
- expose (with a NodePort) the web UI so we can access it from outside.
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
## Building and shipping our app
|
||||
|
||||
- Build on our control node (`node1`)
|
||||
- We will pick a registry
|
||||
|
||||
- Tag images so that they are named `$REGISTRY/servicename`
|
||||
(let's pretend the address will be `REGISTRY:PORT`)
|
||||
|
||||
- Upload them to a registry
|
||||
- We will build on our control node (`node1`)
|
||||
|
||||
- Create deployments using the images
|
||||
(the images will be named `REGISTRY:PORT/servicename`)
|
||||
|
||||
- Expose (with a ClusterIP) the services that need to communicate
|
||||
- We will push the images to the registry
|
||||
|
||||
- Expose (with a NodePort) the WebUI
|
||||
- These images will be usable by the other nodes of the cluster
|
||||
|
||||
(i.e., we could do `docker run REGISTRY:PORT/servicename` from these nodes)
|
||||
|
||||
---
|
||||
|
||||
## A shortcut opportunity
|
||||
|
||||
- As it happens, the images that we need do already exist on the Docker Hub:
|
||||
|
||||
https://hub.docker.com/r/dockercoins/
|
||||
|
||||
- We could use them instead of using our own registry and images
|
||||
|
||||
*In the following slides, we are going to show how to run a registry
|
||||
and use it to host container images. We will also show you how to
|
||||
use the existing images from the Docker Hub, so that you can catch
|
||||
up (or skip altogether the build/push part) if needed.*
|
||||
|
||||
---
|
||||
|
||||
@@ -40,11 +87,20 @@ In this part, we will:
|
||||
|
||||
- We could use the Docker Hub
|
||||
|
||||
- Or a service offered by our cloud provider (ACR, GCR, ECR...)
|
||||
- There are alternatives like Quay
|
||||
|
||||
- Or we could just self-host that registry
|
||||
- Each major cloud provider has an option as well
|
||||
|
||||
*We'll self-host the registry because it's the most generic solution for this workshop.*
|
||||
(ACR on Azure, ECR on AWS, GCR on Google Cloud...)
|
||||
|
||||
- There are also commercial products to run our own registry
|
||||
|
||||
(Docker EE, Quay...)
|
||||
|
||||
- And open source options, too!
|
||||
|
||||
*We are going to self-host an open source registry because it's the most generic solution for this workshop. We will use Docker's reference
|
||||
implementation for simplicity.*
|
||||
|
||||
---
|
||||
|
||||
@@ -66,7 +122,7 @@ In this part, we will:
|
||||
|
||||
---
|
||||
|
||||
# Deploying a self-hosted registry
|
||||
## Deploying a self-hosted registry
|
||||
|
||||
- We will deploy a registry container, and expose it with a NodePort
|
||||
|
||||
@@ -74,7 +130,7 @@ In this part, we will:
|
||||
|
||||
- Create the registry service:
|
||||
```bash
|
||||
kubectl run registry --image=registry
|
||||
kubectl create deployment registry --image=registry
|
||||
```
|
||||
|
||||
- Expose it on a NodePort:
|
||||
@@ -246,7 +302,28 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Deploying all the things
|
||||
## Catching up
|
||||
|
||||
- If you have problems deploying the registry ...
|
||||
|
||||
- Or building or pushing the images ...
|
||||
|
||||
- Don't worry: you can easily use pre-built images from the Docker Hub!
|
||||
|
||||
- The images are named `dockercoins/worker:v0.1`, `dockercoins/rng:v0.1`, etc.
|
||||
|
||||
- To use them, just set the `REGISTRY` environment variable to `dockercoins`:
|
||||
```bash
|
||||
export REGISTRY=dockercoins
|
||||
```
|
||||
|
||||
- Make sure to set the `TAG` to `v0.1`
|
||||
|
||||
(our repositories on the Docker Hub do not provide a `latest` tag)
|
||||
|
||||
---
|
||||
|
||||
# Running our application on Kubernetes
|
||||
|
||||
- We can now deploy our code (as well as a redis instance)
|
||||
|
||||
@@ -254,13 +331,13 @@ class: extra-details
|
||||
|
||||
- Deploy `redis`:
|
||||
```bash
|
||||
kubectl run redis --image=redis
|
||||
kubectl create deployment redis --image=redis
|
||||
```
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl run $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
@@ -299,7 +376,7 @@ kubectl wait deploy/worker --for condition=available
|
||||
|
||||
---
|
||||
|
||||
# Exposing services internally
|
||||
## Connecting containers together
|
||||
|
||||
- Three deployments need to be reachable by others: `hasher`, `redis`, `rng`
|
||||
|
||||
@@ -346,7 +423,7 @@ We should now see the `worker`, well, working happily.
|
||||
|
||||
---
|
||||
|
||||
# Exposing services for external access
|
||||
## Exposing services for external access
|
||||
|
||||
- Now we would like to access the Web UI
|
||||
|
||||
|
||||
@@ -22,14 +22,19 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's start a replicated `nginx` deployment:
|
||||
- Let's create a deployment running `nginx`:
|
||||
```bash
|
||||
kubectl run yanginx --image=nginx --replicas=3
|
||||
kubectl create deployment yanginx --image=nginx
|
||||
```
|
||||
|
||||
- Scale it to a few replicas:
|
||||
```bash
|
||||
kubectl scale deployment yanginx --replicas=3
|
||||
```
|
||||
|
||||
- Once it's up, check the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l run=yanginx -o yaml | head -n 25
|
||||
kubectl get pods -l app=yanginx -o yaml | head -n 25
|
||||
```
|
||||
|
||||
]
|
||||
@@ -99,12 +104,12 @@ so the lines should not be indented (otherwise the indentation will insert space
|
||||
|
||||
- Delete the Deployment:
|
||||
```bash
|
||||
kubectl delete deployment -l run=yanginx --cascade=false
|
||||
kubectl delete deployment -l app=yanginx --cascade=false
|
||||
```
|
||||
|
||||
- Delete the Replica Set:
|
||||
```bash
|
||||
kubectl delete replicaset -l run=yanginx --cascade=false
|
||||
kubectl delete replicaset -l app=yanginx --cascade=false
|
||||
```
|
||||
|
||||
- Check that the pods are still here:
|
||||
@@ -126,7 +131,7 @@ class: extra-details
|
||||
|
||||
- If we change the labels on a dependent, so that it's not selected anymore
|
||||
|
||||
(e.g. change the `run: yanginx` in the pods of the previous example)
|
||||
(e.g. change the `app: yanginx` in the pods of the previous example)
|
||||
|
||||
- If a deployment tool that we're using does these things for us
|
||||
|
||||
@@ -174,4 +179,4 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
As always, the [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) has useful extra information and pointers.
|
||||
As always, the [documentation](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/) has useful extra information and pointers.
|
||||
|
||||
@@ -122,13 +122,13 @@
|
||||
|
||||
- Create a 10 GB file on each node:
|
||||
```bash
|
||||
for N in $(seq 1 5); do ssh node$N sudo truncate --size 10G /portworx.blk; done
|
||||
for N in $(seq 1 4); do ssh node$N sudo truncate --size 10G /portworx.blk; done
|
||||
```
|
||||
(If SSH asks to confirm host keys, enter `yes` each time.)
|
||||
|
||||
- Associate the file to a loop device on each node:
|
||||
```bash
|
||||
for N in $(seq 1 5); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done
|
||||
for N in $(seq 1 4); do ssh node$N sudo losetup /dev/loop4 /portworx.blk; done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -475,12 +475,14 @@ class: extra-details
|
||||
|
||||
- The Kubernetes service endpoints exporter uses tag `pod` instead
|
||||
|
||||
- And this is why we can't have nice things
|
||||
- See [this blog post](https://www.robustperception.io/exposing-the-software-version-to-prometheus) or [this other one](https://www.weave.works/blog/aggregating-pod-resource-cpu-memory-usage-arbitrary-labels-prometheus/) to see how to perform "joins"
|
||||
|
||||
- See [Prometheus issue #2204](https://github.com/prometheus/prometheus/issues/2204) for the rationale
|
||||
- Alas, Prometheus cannot "join" time series with different labels
|
||||
|
||||
([this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) in particular if you want a workaround involving relabeling)
|
||||
(see [Prometheus issue #2204](https://github.com/prometheus/prometheus/issues/2204) for the rationale)
|
||||
|
||||
- Then see [this blog post](https://www.robustperception.io/exposing-the-software-version-to-prometheus) or [this other one](https://www.weave.works/blog/aggregating-pod-resource-cpu-memory-usage-arbitrary-labels-prometheus/) to see how to perform "joins"
|
||||
- There is a workaround involving relabeling, but it's "not cheap"
|
||||
|
||||
- There is a good chance that the situation will improve in the future
|
||||
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
|
||||
|
||||
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
|
||||
--
|
||||
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu 16.04 LTS
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
- We used `kubeadm` on freshly installed VM instances running Ubuntu 18.04 LTS
|
||||
|
||||
1. Install Docker
|
||||
|
||||
|
||||
@@ -266,7 +266,9 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Stateful sets in action
|
||||
# Running a Consul cluster
|
||||
|
||||
- Here is a good use-case for Stateful sets!
|
||||
|
||||
- We are going to deploy a Consul cluster with 3 nodes
|
||||
|
||||
@@ -294,42 +296,54 @@ consul agent -data=dir=/consul/data -client=0.0.0.0 -server -ui \
|
||||
-retry-join=`Y.Y.Y.Y`
|
||||
```
|
||||
|
||||
- We need to replace X.X.X.X and Y.Y.Y.Y with the addresses of other nodes
|
||||
- Replace X.X.X.X and Y.Y.Y.Y with the addresses of other nodes
|
||||
|
||||
- We can specify DNS names, but then they have to be FQDN
|
||||
|
||||
- It's OK for a pod to include itself in the list as well
|
||||
|
||||
- We can therefore use the same command-line on all nodes (easier!)
|
||||
- The same command-line can be used on all nodes (convenient!)
|
||||
|
||||
---
|
||||
|
||||
## Discovering the addresses of other pods
|
||||
## Cloud Auto-join
|
||||
|
||||
- When a service is created for a stateful set, individual DNS entries are created
|
||||
- Since version 1.4.0, Consul can use the Kubernetes API to find its peers
|
||||
|
||||
- These entries are constructed like this:
|
||||
- This is called [Cloud Auto-join]
|
||||
|
||||
`<name-of-stateful-set>-<n>.<name-of-service>.<namespace>.svc.cluster.local`
|
||||
- Instead of passing an IP address, we need to pass a parameter like this:
|
||||
|
||||
- `<n>` is the number of the pod in the set (starting at zero)
|
||||
```
|
||||
consul agent -retry-join "provider=k8s label_selector=\"app=consul\""
|
||||
```
|
||||
|
||||
- If we deploy Consul in the default namespace, the names could be:
|
||||
- Consul needs to be able to talk to the Kubernetes API
|
||||
|
||||
- `consul-0.consul.default.svc.cluster.local`
|
||||
- `consul-1.consul.default.svc.cluster.local`
|
||||
- `consul-2.consul.default.svc.cluster.local`
|
||||
- We can provide a `kubeconfig` file
|
||||
|
||||
- If Consul runs in a pod, it will use the *service account* of the pod
|
||||
|
||||
[Cloud Auto-join]: https://www.consul.io/docs/agent/cloud-auto-join.html#kubernetes-k8s-
|
||||
|
||||
---
|
||||
|
||||
## Setting up Cloud auto-join
|
||||
|
||||
- We need to create a service account for Consul
|
||||
|
||||
- We need to create a role that can `list` and `get` pods
|
||||
|
||||
- We need to bind that role to the service account
|
||||
|
||||
- And of course, we need to make sure that Consul pods use that service account
|
||||
|
||||
---
|
||||
|
||||
## Putting it all together
|
||||
|
||||
- The file `k8s/consul.yaml` defines a service and a stateful set
|
||||
- The file `k8s/consul.yaml` defines the required resources
|
||||
|
||||
(service account, cluster role, cluster role binding, service, stateful set)
|
||||
|
||||
- It has a few extra touches:
|
||||
|
||||
- the name of the namespace is injected through an environment variable
|
||||
|
||||
- a `podAntiAffinity` prevents two pods from running on the same node
|
||||
|
||||
- a `preStop` hook makes the pod leave the cluster when shutdown gracefully
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.12.0
|
||||
- Docker Engine 18.06.1-ce
|
||||
- Kubernetes 1.13.2
|
||||
- Docker Engine 18.09.1
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -22,7 +23,7 @@ class: extra-details
|
||||
|
||||
## Kubernetes and Docker compatibility
|
||||
|
||||
- Kubernetes 1.12.x only validates Docker Engine versions [1.11.2 to 1.13.1 and 17.03.x](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md#external-dependencies)
|
||||
- Kubernetes 1.13.x only validates Docker Engine versions [up to 18.06](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#external-dependencies)
|
||||
|
||||
--
|
||||
|
||||
@@ -34,7 +35,9 @@ class: extra-details
|
||||
|
||||
class: extra-details
|
||||
|
||||
- "Validates" = continuous integration builds
|
||||
- No!
|
||||
|
||||
- "Validates" = continuous integration builds with very extensive (and expensive) testing
|
||||
|
||||
- The Docker API is versioned, and offers strong backward-compatibility
|
||||
|
||||
|
||||
@@ -77,6 +77,18 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
## Relevant sections
|
||||
|
||||
- [Namespaces](kube-selfpaced.yml.html#toc-namespaces)
|
||||
|
||||
- [Network Policies](kube-selfpaced.yml.html#toc-network-policies)
|
||||
|
||||
- [Role-Based Access Control](kube-selfpaced.yml.html#toc-authentication-and-authorization)
|
||||
|
||||
(covers permissions model, user and service accounts management ...)
|
||||
|
||||
---
|
||||
|
||||
## Stateful services (databases etc.)
|
||||
|
||||
- As a first step, it is wiser to keep stateful services *outside* of the cluster
|
||||
@@ -113,6 +125,13 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
- what do we gain by deploying this stateful service on Kubernetes?
|
||||
|
||||
- Relevant sections:
|
||||
[Volumes](kube-selfpaced.yml.html#toc-volumes)
|
||||
|
|
||||
[Stateful Sets](kube-selfpaced.yml.html#toc-stateful-sets)
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
@@ -130,7 +149,7 @@ And *then* it is time to look at orchestration!
|
||||
- URI mapping
|
||||
- and much more!
|
||||
|
||||
- Check out e.g. [Træfik](https://docs.traefik.io/user-guide/kubernetes/)
|
||||
- [This section](kube-selfpaced.yml.html#toc-exposing-http-services-with-ingress-resources) shows how to expose multiple HTTP apps using [Træfik](https://docs.traefik.io/user-guide/kubernetes/)
|
||||
|
||||
---
|
||||
|
||||
@@ -146,6 +165,8 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
(e.g. with an agent bind-mounting the log directory)
|
||||
|
||||
- [This section](kube-selfpaced.yml.html#toc-centralized-logging) shows how to do that with [Fluentd](https://docs.fluentd.org/v0.12/articles/kubernetes-fluentd) and the EFK stack
|
||||
|
||||
---
|
||||
|
||||
## Metrics
|
||||
@@ -180,6 +201,8 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
(It's the container equivalent of the password on a post-it note on your screen)
|
||||
|
||||
- [This section](kube-selfpaced.yml.html#toc-managing-configuration) shows how to manage app config with config maps (among others)
|
||||
|
||||
---
|
||||
|
||||
## Managing stack deployments
|
||||
|
||||
@@ -33,30 +33,30 @@ chapters:
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/ourapponkube.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/portworx.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
# - k8s/build-with-kaniko.md
|
||||
# - k8s/configuration.md
|
||||
#- - k8s/owners-and-dependents.md
|
||||
# - k8s/statefulsets.md
|
||||
# - k8s/portworx.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
@@ -5,6 +5,7 @@ title: |
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
61
slides/kube-twodays.yml
Normal file
61
slides/kube-twodays.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
title: |
|
||||
Deploying and Scaling Applications
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
chat: "[Gitter](https://gitter.im/enix/formation-kubernetes-20190117)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://kube-2019-01.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/ourapponkube.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/kubectlscale.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- - k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/portworx.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,26 +1,14 @@
|
||||
## Intros
|
||||
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
|
||||
<!-- .dummy[
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
- .emoji[🎧] Romain ([@rdegez](https://twitter.com/rdegez), Enix SAS)
|
||||
|
||||
] -->
|
||||
- The training will run from 9am to 5pm
|
||||
|
||||
- The workshop will run from ...
|
||||
|
||||
- There will be a lunch break at ...
|
||||
- There will be a lunch break around noon
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
|
||||
17
slides/override.css
Normal file
17
slides/override.css
Normal file
@@ -0,0 +1,17 @@
|
||||
.remark-slide-content:not(.pic) {
|
||||
background-repeat: no-repeat;
|
||||
background-position: 99% 1%;
|
||||
background-size: 8%;
|
||||
background-image: url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
}
|
||||
|
||||
div.extra-details:not(.pic) {
|
||||
background-image: url("images/extra-details.png"), url(https://enix.io/static/img/logos/logo-domain-cropped.png);
|
||||
background-position: 0.5% 1%, 99% 1%;
|
||||
background-size: 4%, 8%;
|
||||
}
|
||||
|
||||
.remark-slide-content:not(.pic) div.remark-slide-number {
|
||||
top: 16px;
|
||||
right: 112px
|
||||
}
|
||||
@@ -54,49 +54,84 @@ and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
## More detail on our sample application
|
||||
## What's this application?
|
||||
|
||||
- Visit the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://@@GITREPO@@
|
||||
--
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://@@GITREPO@@/tree/master/dockercoins)
|
||||
subdirectory
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
|
||||
- Let's look at the general layout of the source code:
|
||||
--
|
||||
|
||||
there is a Compose file [docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml) ...
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
|
||||
... and 4 other services, each in its own directory:
|
||||
--
|
||||
|
||||
- `rng` = web service generating random bytes
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
- `worker` = background process using `rng` and `hasher`
|
||||
- `webui` = web interface to watch progress
|
||||
- How DockerCoins works:
|
||||
|
||||
- generate a few random bytes
|
||||
|
||||
- hash these bytes
|
||||
|
||||
- increment a counter (to keep track of speed)
|
||||
|
||||
- repeat forever!
|
||||
|
||||
--
|
||||
|
||||
- DockerCoins is *not* a cryptocurrency
|
||||
|
||||
(the only common points are "randomness", "hashing", and "coins" in the name)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## DockerCoins in the microservices era
|
||||
|
||||
## Compose file format version
|
||||
- DockerCoins is made of 5 services:
|
||||
|
||||
*Particularly relevant if you have used Compose before...*
|
||||
- `rng` = web service generating random bytes
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
- `worker` = background process calling `rng` and `hasher`
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
- `webui` = web interface to watch progress
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
- `redis` = data store (holds a counter updated by `worker`)
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
- These 5 services are visible in the application's Compose file,
|
||||
[docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml)
|
||||
|
||||
---
|
||||
|
||||
## How DockerCoins works
|
||||
|
||||
- `worker` invokes web service `rng` to generate random bytes
|
||||
|
||||
- `worker` invokes web servie `hasher` to hash these bytes
|
||||
|
||||
- `worker` does this in an infinite loop
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in our browser
|
||||
|
||||
*(See diagram on next slide!)*
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Service discovery in container-land
|
||||
|
||||
How does each service find out the address of the other ones?
|
||||
|
||||
--
|
||||
|
||||
- We do not hard-code IP addresses in the code
|
||||
|
||||
- We do not hard-code FQDN in the code, either
|
||||
@@ -150,35 +185,46 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
## Show me the code!
|
||||
|
||||
--
|
||||
- You can check the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://@@GITREPO@@
|
||||
|
||||
- It is a DockerCoin miner! .emoji[💰🐳📦🚢]
|
||||
- The application is in the [dockercoins](
|
||||
https://@@GITREPO@@/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
--
|
||||
- The Compose file ([docker-compose.yml](
|
||||
https://@@GITREPO@@/blob/master/dockercoins/docker-compose.yml))
|
||||
lists all 5 services
|
||||
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
- `redis` is using an official image from the Docker Hub
|
||||
|
||||
--
|
||||
- `hasher`, `rng`, `worker`, `webui` are each built from a Dockerfile
|
||||
|
||||
- How DockerCoins works:
|
||||
- Each service's Dockerfile and source code is in its own directory
|
||||
|
||||
- `worker` asks to `rng` to generate a few random bytes
|
||||
|
||||
- `worker` feeds these bytes into `hasher`
|
||||
|
||||
- and repeat forever!
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in your browser
|
||||
(`hasher` is in the [hasher](https://@@GITREPO@@/blob/master/dockercoins/hasher/) directory,
|
||||
`rng` is in the [rng](https://@@GITREPO@@/blob/master/dockercoins/rng/)
|
||||
directory, etc.)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
class: extra-details
|
||||
|
||||

|
||||
## Compose file format version
|
||||
|
||||
*This is relevant only if you have used Compose before 2016...*
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -9,25 +9,3 @@ class: title, in-person
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Final words
|
||||
|
||||
- You can find more content on http://container.training/
|
||||
|
||||
(More slides, videos, dates of upcoming workshops and tutorials...)
|
||||
|
||||
- If you liked this format, I can also teach the following courses:
|
||||
|
||||
- [two-day Kubernetes bootstrap](https://tinyshellscript.com/kubernetes-bootstrap.html)
|
||||
|
||||
- [four-day Kubernetes administrator training](https://tinyshellscript.com/kubernetes-ops-week.html)
|
||||
|
||||
- custom courses of any length, covering Docker, Swarm, Kubernetes
|
||||
|
||||
- If you want me to train your team, contact me:
|
||||
|
||||
jerome.petazzoni@gmail.com
|
||||
|
||||
.footnote[*Thank you!*]
|
||||
|
||||
@@ -11,11 +11,10 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*<br/>
|
||||
*Thank you!*
|
||||
<!--
|
||||
**WiFi: ENIX**</br>
|
||||
**Password: AIRBUS2019**<br/>
|
||||
-->
|
||||
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
|
||||
@@ -40,10 +40,10 @@ chapters:
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
|
||||
@@ -40,7 +40,7 @@ chapters:
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
|
||||
@@ -41,7 +41,7 @@ chapters:
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
@@ -41,7 +41,7 @@ chapters:
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/compose2swarm.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
|
||||
@@ -10,9 +10,10 @@
|
||||
- And run this little for loop:
|
||||
```bash
|
||||
cd ~/container.training/dockercoins
|
||||
REGISTRY=127.0.0.1:5000 TAG=v1
|
||||
export REGISTRY=127.0.0.1:5000
|
||||
export TAG=v0.1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
docker tag dockercoins_$SERVICE $REGISTRY/$SERVICE:$TAG
|
||||
docker build -t $REGISTRY/$SERVICE:$TAG ./$SERVICE
|
||||
docker push $REGISTRY/$SERVICE
|
||||
done
|
||||
```
|
||||
@@ -119,12 +120,12 @@ It alters the code path for `docker run`, so it is allowed only under strict cir
|
||||
|
||||
- Start the other services:
|
||||
```bash
|
||||
REGISTRY=127.0.0.1:5000
|
||||
TAG=v1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
docker service create --network dockercoins --detach=true \
|
||||
--name $SERVICE $REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
export REGISTRY=127.0.0.1:5000
|
||||
export TAG=v0.1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
docker service create --network dockercoins --detach=true \
|
||||
--name $SERVICE $REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
name: healthchecks
|
||||
|
||||
# Health checks
|
||||
# Health checks and auto-rollbacks
|
||||
|
||||
(New in Docker Engine 1.12)
|
||||
|
||||
@@ -61,7 +61,7 @@ name: healthchecks
|
||||
|
||||
---
|
||||
|
||||
## Automated rollbacks
|
||||
## Enabling health checks and auto-rollbacks
|
||||
|
||||
Here is a comprehensive example using the CLI:
|
||||
|
||||
@@ -96,6 +96,8 @@ We will use the following Compose file (`stacks/dockercoins+healthcheck.yml`):
|
||||
hasher:
|
||||
build: dockercoins/hasher
|
||||
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
|
||||
healthcheck:
|
||||
test: curl -f http://localhost/ || exit 1
|
||||
deploy:
|
||||
replicas: 7
|
||||
update_config:
|
||||
@@ -109,7 +111,9 @@ We will use the following Compose file (`stacks/dockercoins+healthcheck.yml`):
|
||||
|
||||
---
|
||||
|
||||
## Enabling auto-rollback
|
||||
## Enabling auto-rollback in dockercoins
|
||||
|
||||
We need to update our services with a healthcheck.
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -118,44 +122,22 @@ We will use the following Compose file (`stacks/dockercoins+healthcheck.yml`):
|
||||
cd ~/container.training/stacks
|
||||
```
|
||||
|
||||
- Deploy the updated stack:
|
||||
- Deploy the updated stack with healthchecks built-in:
|
||||
```bash
|
||||
docker stack deploy --compose-file dockercoins+healthcheck.yml dockercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
This will also scale the `hasher` service to 7 instances.
|
||||
|
||||
---
|
||||
|
||||
## Visualizing a rolling update
|
||||
|
||||
First, let's make an "innocent" change and deploy it.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the `sleep` delay in the code:
|
||||
```bash
|
||||
sed -i "s/sleep 0.1/sleep 0.2/" dockercoins/hasher/hasher.rb
|
||||
```
|
||||
|
||||
- Build, ship, and run the new image:
|
||||
```bash
|
||||
export TAG=v0.5
|
||||
docker-compose -f dockercoins+healthcheck.yml build
|
||||
docker-compose -f dockercoins+healthcheck.yml push
|
||||
docker service update dockercoins_hasher \
|
||||
--image=127.0.0.1:5000/hasher:$TAG
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Visualizing an automated rollback
|
||||
|
||||
And now, a breaking change that will cause the health check to fail:
|
||||
- Here's a good example of why healthchecks are necessary
|
||||
|
||||
- This breaking change will prevent the app from listening on the correct port
|
||||
|
||||
- The container still runs fine, it just won't accept connections on port 80
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -166,11 +148,10 @@ And now, a breaking change that will cause the health check to fail:
|
||||
|
||||
- Build, ship, and run the new image:
|
||||
```bash
|
||||
export TAG=v0.6
|
||||
export TAG=v0.3
|
||||
docker-compose -f dockercoins+healthcheck.yml build
|
||||
docker-compose -f dockercoins+healthcheck.yml push
|
||||
docker service update dockercoins_hasher \
|
||||
--image=127.0.0.1:5000/hasher:$TAG
|
||||
docker service update --image=127.0.0.1:5000/hasher:$TAG dockercoins_hasher
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -160,7 +160,7 @@ class: self-paced
|
||||
|
||||
- Keep managers in one region (multi-zone/datacenter/rack)
|
||||
|
||||
- Groups of 3 or 5 nodes: all are managers. Beyond 5, seperate out managers and workers
|
||||
- Groups of 3 or 5 nodes: all are managers. Beyond 5, separate out managers and workers
|
||||
|
||||
- Groups of 10-100 nodes: pick 5 "stable" nodes to be managers
|
||||
|
||||
|
||||
@@ -1,72 +1,44 @@
|
||||
# Rolling updates
|
||||
|
||||
- Let's change a scaled service: `worker`
|
||||
- Let's force an update on hasher to watch it update
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `worker/worker.py`
|
||||
|
||||
- Locate the `sleep` instruction and change the delay
|
||||
|
||||
- Build, ship, and run our changes:
|
||||
- First lets scale up hasher to 7 replicas:
|
||||
```bash
|
||||
export TAG=v0.4
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
docker stack deploy -c dockercoins.yml dockercoins
|
||||
docker service scale dockercoins_hasher=7
|
||||
```
|
||||
|
||||
- Force a rolling update (replace containers) to different image:
|
||||
```bash
|
||||
docker service update --image 127.0.0.1:5000/hasher:v0.1 dockercoins_hasher
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
- You can run `docker events` in a separate `node1` shell to see Swarm actions
|
||||
|
||||
## Viewing our update as it rolls out
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the status of the `dockercoins_worker` service:
|
||||
```bash
|
||||
watch docker service ps dockercoins_worker
|
||||
```
|
||||
|
||||
<!-- ```wait dockercoins_worker.1``` -->
|
||||
<!-- ```keys ^C``` -->
|
||||
|
||||
- Hide the tasks that are shutdown:
|
||||
```bash
|
||||
watch -n1 "docker service ps dockercoins_worker | grep -v Shutdown.*Shutdown"
|
||||
```
|
||||
|
||||
<!-- ```wait dockercoins_worker.1``` -->
|
||||
<!-- ```keys ^C``` -->
|
||||
|
||||
]
|
||||
|
||||
If you had stopped the workers earlier, this will automatically restart them.
|
||||
|
||||
By default, SwarmKit does a rolling upgrade, one instance at a time.
|
||||
|
||||
We should therefore see the workers being updated one my one.
|
||||
- You can use `--force` to replace containers without a config change
|
||||
|
||||
---
|
||||
|
||||
## Changing the upgrade policy
|
||||
|
||||
- We can set upgrade parallelism (how many instances to update at the same time)
|
||||
|
||||
- And upgrade delay (how long to wait between two batches of instances)
|
||||
- We can change many options on how updates happen
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change the parallelism to 2 and the delay to 5 seconds:
|
||||
- Change the parallelism to 2, and the max failed container updates to 25%:
|
||||
```bash
|
||||
docker service update dockercoins_worker \
|
||||
--update-parallelism 2 --update-delay 5s
|
||||
docker service update --update-parallelism 2 \
|
||||
--update-max-failure-ratio .25 dockercoins_hasher
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The current upgrade will continue at a faster pace.
|
||||
- No containers were replaced, this is called a "no op" change
|
||||
|
||||
- Service metadata-only changes don't require orchestrator operations
|
||||
|
||||
---
|
||||
|
||||
@@ -90,15 +62,17 @@ The current upgrade will continue at a faster pace.
|
||||
|
||||
- At any time (e.g. before the upgrade is complete), we can rollback:
|
||||
|
||||
- by editing the Compose file and redeploying;
|
||||
- by editing the Compose file and redeploying
|
||||
|
||||
- or with the special `--rollback` flag
|
||||
- by using the special `--rollback` flag with `service update`
|
||||
|
||||
- by using `docker service rollback`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to rollback the service:
|
||||
- Try to rollback the webui service:
|
||||
```bash
|
||||
docker service update dockercoins_worker --rollback
|
||||
docker service rollback dockercoins_webui
|
||||
```
|
||||
|
||||
]
|
||||
@@ -111,6 +85,8 @@ What happens with the web UI graph?
|
||||
|
||||
- Rollback reverts to the previous service definition
|
||||
|
||||
- see `PreviousSpec` in `docker service inspect <servicename>`
|
||||
|
||||
- If we visualize successive updates as a stack:
|
||||
|
||||
- it doesn't "pop" the latest update
|
||||
|
||||
@@ -22,7 +22,7 @@ class: btp-manual
|
||||
|
||||
---
|
||||
|
||||
# Integration with Compose
|
||||
# Swarm Stacks
|
||||
|
||||
- Compose is great for local development
|
||||
|
||||
@@ -14,11 +14,12 @@
|
||||
|
||||
---
|
||||
|
||||
## Updating a single service the hard way
|
||||
## Updating a single service with `service update`
|
||||
|
||||
- To update a single service, we could do the following:
|
||||
```bash
|
||||
REGISTRY=localhost:5000 TAG=v0.3
|
||||
export REGISTRY=127.0.0.1:5000
|
||||
export TAG=v0.2
|
||||
IMAGE=$REGISTRY/dockercoins_webui:$TAG
|
||||
docker build -t $IMAGE webui/
|
||||
docker push $IMAGE
|
||||
@@ -31,11 +32,11 @@
|
||||
|
||||
---
|
||||
|
||||
## Updating services the easy way
|
||||
## Updating services with `stack deploy`
|
||||
|
||||
- With the Compose integration, all we have to do is:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
export TAG=v0.2
|
||||
docker-compose -f composefile.yml build
|
||||
docker-compose -f composefile.yml push
|
||||
docker stack deploy -c composefile.yml nameofstack
|
||||
@@ -47,6 +48,8 @@
|
||||
|
||||
- We don't need to learn new commands!
|
||||
|
||||
- It will diff each service and only update ones that changed
|
||||
|
||||
---
|
||||
|
||||
## Changing the code
|
||||
@@ -55,26 +58,11 @@
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the file `webui/files/index.html`:
|
||||
- Update the size of text on our webui:
|
||||
```bash
|
||||
vi dockercoins/webui/files/index.html
|
||||
sed -i "s/15px/50px/" dockercoins/webui/files/index.html
|
||||
```
|
||||
|
||||
<!-- ```wait <title>``` -->
|
||||
|
||||
- Locate the `font-size` CSS attribute and increase it (at least double it)
|
||||
|
||||
<!--
|
||||
```keys /font-size```
|
||||
```keys ^J```
|
||||
```keys lllllllllllllcw45px```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Save and exit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -92,7 +80,7 @@
|
||||
|
||||
- Build, ship, and run:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
export TAG=v0.2
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
docker stack deploy -c dockercoins.yml dockercoins
|
||||
@@ -100,6 +88,8 @@
|
||||
|
||||
]
|
||||
|
||||
- Because we're tagging all images in this demo v0.2, deploy will update all apps, FYI
|
||||
|
||||
---
|
||||
|
||||
## Viewing our changes
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
<title>@@TITLE@@</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<link rel="stylesheet" href="workshop.css">
|
||||
<link rel="stylesheet" href="override.css">
|
||||
</head>
|
||||
<body>
|
||||
<!--
|
||||
|
||||
@@ -10,6 +10,8 @@ services:
|
||||
hasher:
|
||||
build: dockercoins/hasher
|
||||
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
|
||||
healthcheck:
|
||||
test: curl -f http://localhost/ || exit 1
|
||||
deploy:
|
||||
replicas: 7
|
||||
update_config:
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
rng:
|
||||
build: dockercoins/rng
|
||||
image: ${REGISTRY-127.0.0.1:5000}/rng:${TAG-latest}
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
hasher:
|
||||
build: dockercoins/hasher
|
||||
image: ${REGISTRY-127.0.0.1:5000}/hasher:${TAG-latest}
|
||||
deploy:
|
||||
replicas: 7
|
||||
update_config:
|
||||
delay: 5s
|
||||
failure_action: rollback
|
||||
max_failure_ratio: .5
|
||||
monitor: 5s
|
||||
parallelism: 1
|
||||
|
||||
webui:
|
||||
build: dockercoins/webui
|
||||
image: ${REGISTRY-127.0.0.1:5000}/webui:${TAG-latest}
|
||||
ports:
|
||||
- "8000:80"
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
|
||||
worker:
|
||||
build: dockercoins/worker
|
||||
image: ${REGISTRY-127.0.0.1:5000}/worker:${TAG-latest}
|
||||
deploy:
|
||||
replicas: 10
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: elasticsearch:2
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
|
||||
Reference in New Issue
Block a user