mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-04-28 13:06:34 +00:00
Compare commits
7 Commits
vmware-201
...
clt-2019-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1395e87f9 | ||
|
|
e0a89b1598 | ||
|
|
dd3a9e59ac | ||
|
|
c24435e9aa | ||
|
|
21477170f3 | ||
|
|
cc91063eb8 | ||
|
|
fe6e0e774f |
@@ -1,160 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy:1
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
name: kibana
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
- host: kibana.185.145.251.54.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
serviceName: kibana
|
||||
servicePort: 5601
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -1,4 +1,5 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
@@ -750,16 +751,3 @@ spec:
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
---
|
||||
# That one is an extra.
|
||||
# Create a default Storage Class to simplify Portworx setup.
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
@@ -12,14 +12,7 @@ spec:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
#schedulerName: stork
|
||||
initContainers:
|
||||
- name: rmdir
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- mountPath: /vol
|
||||
name: postgres
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
|
||||
@@ -7,8 +7,8 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
@@ -38,7 +38,6 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -67,14 +66,6 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -10,21 +10,15 @@ These tools can help you to create VMs on:
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
|
||||
## General Workflow
|
||||
|
||||
@@ -262,32 +256,3 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
@@ -127,11 +127,11 @@ _cmd_kubebins() {
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
@@ -143,7 +143,7 @@ _cmd_kubebins() {
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -362,16 +362,6 @@ _cmd_opensg() {
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
_cmd portworx "Prepare the nodes for Portworx deployment"
|
||||
_cmd_portworx() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo truncate --size 10G /portworx.blk &&
|
||||
sudo losetup /dev/loop4 /portworx.blk"
|
||||
}
|
||||
|
||||
_cmd disableaddrchecks "Disable source/destination IP address checks"
|
||||
_cmd_disableaddrchecks() {
|
||||
TAG=$1
|
||||
|
||||
@@ -106,7 +106,6 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
/ /clt.html 200!
|
||||
|
||||
3
slides/clt.html
Normal file
3
slides/clt.html
Normal file
@@ -0,0 +1,3 @@
|
||||
<a href="containers.yml.html">Docker and Containers (Tuesday)</a>
|
||||
|
|
||||
<a href="kubernetes.yml.html">Kubernetes and Orchestration (Wednesday-Thursday)</a>
|
||||
77
slides/containers.yml
Normal file
77
slides/containers.yml
Normal file
@@ -0,0 +1,77 @@
|
||||
title: |
|
||||
Docker
|
||||
&
|
||||
Containers
|
||||
|
||||
chat: "(None)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://clt-2019-10.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
#- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
#- containers/Start_And_Attach.md
|
||||
-
|
||||
- containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
-
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
-
|
||||
- containers/Naming_And_Inspecting.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Container_Networking_Basics.md
|
||||
- containers/Local_Development_Workflow.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
-
|
||||
- |
|
||||
# (Extra content: storage and network)
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
- containers/Ambassadors.md
|
||||
-
|
||||
- |
|
||||
# (Extra content: ops and management)
|
||||
- containers/Labels.md
|
||||
- containers/Resource_Limits.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Docker_Machine.md
|
||||
- containers/Logging.md
|
||||
-
|
||||
- |
|
||||
# (Extra content: advanced topics)
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Container_Engines.md
|
||||
- containers/Ecosystem.md
|
||||
-
|
||||
- |
|
||||
# (Extra content: container internals)
|
||||
- containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
- containers/Containers_From_Scratch.md
|
||||
@@ -104,6 +104,22 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
@@ -82,7 +82,7 @@ CMD ["python", "app.py"]
|
||||
|
||||
* Layers cannot represent efficiently when a file is moved either.
|
||||
|
||||
* As a result, operations like `chown`, `chown`, `mv` can be expensive.
|
||||
* As a result, operations like `chown`, `chmod`, `mv` can be expensive.
|
||||
|
||||
* For instance, in the Dockerfile snippet below, each `RUN` line
|
||||
creates a layer with an entire copy of `some-file`.
|
||||
|
||||
@@ -102,44 +102,29 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Docker Desktop
|
||||
## Docker Desktop for Mac and Docker Desktop for Windows
|
||||
|
||||
* Special Docker edition available for Mac and Windows
|
||||
* Special Docker Editions that integrate well with their respective host OS
|
||||
|
||||
* Integrates well with the host OS:
|
||||
* Provide user-friendly GUI to edit Docker configuration and settings
|
||||
|
||||
* installed like normal user applications on the host
|
||||
* Leverage the host OS virtualization subsystem (e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* provides user-friendly GUI to edit Docker configuration and settings
|
||||
* Installed like normal user applications on the host
|
||||
|
||||
* Only support running one Docker VM at a time ...
|
||||
* Under the hood, they both run a tiny VM (transparent to our daily use)
|
||||
|
||||
* Access network resources like normal applications
|
||||
<br/>(and therefore, play better with enterprise VPNs and firewalls)
|
||||
|
||||
* Support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
<br/>
|
||||
... but we can use `docker-machine`, the Docker Toolbox, VirtualBox, etc. to get a cluster.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Docker Desktop internals
|
||||
|
||||
* Leverages the host OS virtualization subsystem
|
||||
|
||||
(e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* Under the hood, runs a tiny VM
|
||||
|
||||
(transparent to our daily use)
|
||||
|
||||
* Accesses network resources like normal applications
|
||||
|
||||
(and therefore, plays better with enterprise VPNs and firewalls)
|
||||
|
||||
* Supports filesystem sharing through volumes
|
||||
|
||||
(we'll talk about this later)
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on macOS and Windows
|
||||
|
||||
When you execute `docker version` from the terminal:
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
speaker: jpetazzo
|
||||
title: Deploying and scaling applications with Kubernetes
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
|
||||
slides: https://velocity-2019-11.container.training/
|
||||
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
|
||||
@@ -667,12 +667,17 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- There are a few tools to help us with that
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
|
||||
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
|
||||
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
|
||||
- This is one way to install it:
|
||||
```bash
|
||||
docker run --rm -v /usr/local/bin:/go/bin golang \
|
||||
go get -v github.com/aquasecurity/kubectl-who-can
|
||||
```
|
||||
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
- This is one way to use it:
|
||||
```bash
|
||||
kubectl-who-can create pods
|
||||
```
|
||||
|
||||
@@ -15,3 +15,26 @@
|
||||
- `dockercoins/webui:v0.1`
|
||||
|
||||
- `dockercoins/worker:v0.1`
|
||||
|
||||
---
|
||||
|
||||
## Setting `$REGISTRY` and `$TAG`
|
||||
|
||||
- In the upcoming exercises and labs, we use a couple of environment variables:
|
||||
|
||||
- `$REGISTRY` as a prefix to all image names
|
||||
|
||||
- `$TAG` as the image version tag
|
||||
|
||||
- For example, the worker image is `$REGISTRY/worker:$TAG`
|
||||
|
||||
- If you copy-paste the commands in these exercises:
|
||||
|
||||
**make sure that you set `$REGISTRY` and `$TAG` first!**
|
||||
|
||||
- For example:
|
||||
```bash
|
||||
export REGISTRY=dockercoins TAG=v0.1
|
||||
```
|
||||
|
||||
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)
|
||||
|
||||
@@ -44,37 +44,21 @@
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Autoscaling
|
||||
- Basic autoscaling
|
||||
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
- Blue/green deployment, canary deployment
|
||||
|
||||
- Ressource management and scheduling
|
||||
- Long running services, but also batch (one-off) jobs
|
||||
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
- Overcommit our cluster and *evict* low-priority jobs
|
||||
|
||||
- Advanced rollout patterns
|
||||
- Run services with *stateful* data (databases etc.)
|
||||
|
||||
(blue/green deployment, canary deployment)
|
||||
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
|
||||
|
||||
---
|
||||
- Integrating third party services (*service catalog*)
|
||||
|
||||
## More things that Kubernetes can do for us
|
||||
|
||||
- Batch jobs
|
||||
|
||||
(one-off; parallel; also cron-style periodic execution)
|
||||
|
||||
- Fine-grained access control
|
||||
|
||||
(defining *what* can be done by *whom* on *which* resources)
|
||||
|
||||
- Stateful services
|
||||
|
||||
(databases, message queues, etc.)
|
||||
|
||||
- Automating complex tasks with *operators*
|
||||
|
||||
(e.g. database replication, failover, etc.)
|
||||
- Automating complex tasks (*operators*)
|
||||
|
||||
---
|
||||
|
||||
@@ -207,29 +191,11 @@ No!
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
- We could also use `rkt` ("Rocket") from CoreOS
|
||||
|
||||
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
|
||||
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some runtimes available through CRI
|
||||
|
||||
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
|
||||
|
||||
- maintained by Docker, IBM, and community
|
||||
- used by Docker Engine, microk8s, k3s, GKE; also standalone
|
||||
- comes with its own CLI, `ctr`
|
||||
|
||||
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
|
||||
|
||||
- maintained by Red Hat, SUSE, and community
|
||||
- used by OpenShift and Kubic
|
||||
- designed specifically as a minimal runtime for Kubernetes
|
||||
|
||||
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
|
||||
(like CRI-O, or containerd)
|
||||
|
||||
---
|
||||
|
||||
@@ -299,48 +265,6 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
## Scaling
|
||||
|
||||
- How would we scale the pod shown on the previous slide?
|
||||
|
||||
- **Do** create additional pods
|
||||
|
||||
- each pod can be on a different node
|
||||
|
||||
- each pod will have its own IP address
|
||||
|
||||
- **Do not** add more NGINX containers in the pod
|
||||
|
||||
- all the NGINX containers would be on the same node
|
||||
|
||||
- they would all have the same IP address
|
||||
<br/>(resulting in `Address alreading in use` errors)
|
||||
|
||||
---
|
||||
|
||||
## Together or separate
|
||||
|
||||
- Should we put e.g. a web application server and a cache together?
|
||||
<br/>
|
||||
("cache" being something like e.g. Memcached or Redis)
|
||||
|
||||
- Putting them **in the same pod** means:
|
||||
|
||||
- they have to be scaled together
|
||||
|
||||
- they can communicate very efficiently over `localhost`
|
||||
|
||||
- Putting them **in different pods** means:
|
||||
|
||||
- they can be scaled separately
|
||||
|
||||
- they must communicate over remote IP addresses
|
||||
<br/>(incurring more latency, lower performance)
|
||||
|
||||
- Both scenarios can make sense, depending on our goals
|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
@@ -193,12 +193,7 @@
|
||||
|
||||
- Best practice: set a memory limit, and pass it to the runtime
|
||||
|
||||
- Note: recent versions of the JVM can do this automatically
|
||||
|
||||
(see [JDK-8146115](https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8146115))
|
||||
and
|
||||
[this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/)
|
||||
for detailed examples)
|
||||
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -481,13 +481,13 @@ docker run alpine echo hello world
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the file `~/.kube/config` with `kubectl`:
|
||||
- Create the file `kubeconfig.kubelet` with `kubectl`:
|
||||
```bash
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
set-cluster localhost --server http://localhost:8080
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
set-context localhost --cluster localhost
|
||||
kubectl config \
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
use-context localhost
|
||||
```
|
||||
|
||||
@@ -495,7 +495,19 @@ docker run alpine echo hello world
|
||||
|
||||
---
|
||||
|
||||
## Our `~/.kube/config` file
|
||||
## All Kubernetes clients can use `kubeconfig`
|
||||
|
||||
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
|
||||
|
||||
- All Kubernetes clients can use a similar file
|
||||
|
||||
- The `kubectl config` commands can be used to manipulate these files
|
||||
|
||||
- This highlights that kubelet is a "normal" client of the API server
|
||||
|
||||
---
|
||||
|
||||
## Our `kubeconfig.kubelet` file
|
||||
|
||||
The file that we generated looks like the one below.
|
||||
|
||||
@@ -521,9 +533,9 @@ clusters:
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start kubelet with that kubeconfig file:
|
||||
- Start kubelet with that `kubeconfig.kubelet` file:
|
||||
```bash
|
||||
kubelet --kubeconfig ~/.kube/config
|
||||
kubelet --kubeconfig kubeconfig.kubelet
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -1,3 +1,41 @@
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- A good healthcheck should always indicate the health of the service itself
|
||||
|
||||
- It should not be affected by the state of the service's dependencies
|
||||
|
||||
- Example: a web server requiring a database connection to operate
|
||||
|
||||
(make sure that the healthcheck can report "OK" even if the database is down;
|
||||
<br/>
|
||||
because it won't help us to restart the web server if the issue is with the DB!)
|
||||
|
||||
- Example: a microservice calling other microservices
|
||||
|
||||
- Example: a worker process
|
||||
|
||||
(these will generally require minor code changes to report health)
|
||||
|
||||
---
|
||||
|
||||
## Adding healthchecks to an app
|
||||
|
||||
- Let's add healthchecks to DockerCoins!
|
||||
@@ -333,3 +371,25 @@ class: extra-details
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for worker
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because worker isn't a backend for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is an option
|
||||
|
||||
(but it has a high potential for unwanted side effects and false positives)
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
|
||||
@@ -42,11 +42,9 @@
|
||||
|
||||
- internal corruption (causing all requests to error)
|
||||
|
||||
- Anything where our incident response would be "just restart/reboot it"
|
||||
- If the liveness probe fails *N* consecutive times, the container is killed
|
||||
|
||||
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
|
||||
|
||||
- Otherwise we just restart our pods for no reason, creating useless load
|
||||
- *N* is the `failureThreshold` (3 by default)
|
||||
|
||||
---
|
||||
|
||||
@@ -54,7 +52,7 @@
|
||||
|
||||
- Indicates if the container is ready to serve traffic
|
||||
|
||||
- If a container becomes "unready" it might be ready again soon
|
||||
- If a container becomes "unready" (let's say busy!) it might be ready again soon
|
||||
|
||||
- If the readiness probe fails:
|
||||
|
||||
@@ -68,79 +66,19 @@
|
||||
|
||||
## When to use a readiness probe
|
||||
|
||||
- To indicate failure due to an external cause
|
||||
- To indicate temporary failures
|
||||
|
||||
- database is down or unreachable
|
||||
- the application can only service *N* parallel connections
|
||||
|
||||
- mandatory auth or other backend service unavailable
|
||||
- the runtime is busy doing garbage collection or initial data load
|
||||
|
||||
- To indicate temporary failure or unavailability
|
||||
- The container is marked as "not ready" after `failureThreshold` failed attempts
|
||||
|
||||
- application can only service *N* parallel connections
|
||||
(3 by default)
|
||||
|
||||
- runtime is busy doing garbage collection or initial data load
|
||||
- It is marked again as "ready" after `successThreshold` successful attempts
|
||||
|
||||
- For processes that take a long time to start
|
||||
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
- If a web server depends on a database to function, and the database is down:
|
||||
|
||||
- the web server's liveness probe should succeed
|
||||
|
||||
- the web server's readiness probe should fail
|
||||
|
||||
- Same thing for any hard dependency (without which the container can't work)
|
||||
|
||||
.warning[**Do not** fail liveness probes for problems that are external to the container]
|
||||
|
||||
---
|
||||
|
||||
## Timing and thresholds
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
.warning[If a probe takes longer than that, it is considered as a FAIL]
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- A probe can have an `initialDelaySeconds` parameter (default: 0)
|
||||
|
||||
- Kubernetes will wait that amount of time before running the probe for the first time
|
||||
|
||||
(this is important to avoid killing services that take a long time to start)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Startup probe
|
||||
|
||||
- Kubernetes 1.16 introduces a third type of probe: `startupProbe`
|
||||
|
||||
(it is in `alpha` in Kubernetes 1.16)
|
||||
|
||||
- It can be used to indicate "container not ready *yet*"
|
||||
|
||||
- process is still starting
|
||||
|
||||
- loading external data, priming caches
|
||||
|
||||
- Before Kubernetes 1.16, we had to use the `initialDelaySeconds` parameter
|
||||
|
||||
(available for both liveness and readiness probes)
|
||||
|
||||
- `initialDelaySeconds` is a rigid delay (always wait X before running probes)
|
||||
|
||||
- `startupProbe` works better when a container start time can vary a lot
|
||||
(1 by default)
|
||||
|
||||
---
|
||||
|
||||
@@ -174,12 +112,10 @@ class: extra-details
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
- Unavailable backends get removed from load balancer rotation
|
||||
- Overloaded backends get removed from load balancer rotation
|
||||
|
||||
(thus improving response times across the board)
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
---
|
||||
|
||||
## Example: HTTP probe
|
||||
@@ -229,56 +165,14 @@ If the Redis process becomes unresponsive, it will be killed.
|
||||
|
||||
---
|
||||
|
||||
## Questions to ask before adding healthchecks
|
||||
## Details about liveness and readiness probes
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- Liveness checks should not be influenced by the state of external services
|
||||
|
||||
- All checks should reply quickly (by default, less than 1 second)
|
||||
|
||||
- Otherwise, they are considered to fail
|
||||
|
||||
- This might require to check the health of dependencies asynchronously
|
||||
|
||||
(e.g. if a database or API might be healthy but still take more than
|
||||
1 second to reply, we should check the status asynchronously and report
|
||||
a cached status)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for workers
|
||||
|
||||
(In that context, worker = process that doesn't accept connections)
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because workers aren't backends for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is a (potentially expensive) option
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
@@ -14,80 +14,42 @@
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
- HTTP services can also use `Ingress` resources (more on that later)
|
||||
---
|
||||
|
||||
## Basic service types
|
||||
|
||||
- `ClusterIP` (default type)
|
||||
|
||||
- a virtual IP address is allocated for the service (in an internal, private range)
|
||||
- this IP address is reachable only from within the cluster (nodes and pods)
|
||||
- our code can connect to the service using the original port number
|
||||
|
||||
- `NodePort`
|
||||
|
||||
- a port is allocated for the service (by default, in the 30000-32768 range)
|
||||
- that port is made available *on all our nodes* and anybody can connect to it
|
||||
- our code must be changed to connect to that new port number
|
||||
|
||||
These service types are always available.
|
||||
|
||||
Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules.
|
||||
|
||||
---
|
||||
|
||||
## `ClusterIP`
|
||||
## More service types
|
||||
|
||||
- It's the default service type
|
||||
- `LoadBalancer`
|
||||
|
||||
- A virtual IP address is allocated for the service
|
||||
- an external load balancer is allocated for the service
|
||||
- the load balancer is configured accordingly
|
||||
<br/>(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port)
|
||||
- available only when the underlying infrastructure provides some "load balancer as a service"
|
||||
<br/>(e.g. AWS, Azure, GCE, OpenStack...)
|
||||
|
||||
(in an internal, private range; e.g. 10.96.0.0/12)
|
||||
- `ExternalName`
|
||||
|
||||
- This IP address is reachable only from within the cluster (nodes and pods)
|
||||
|
||||
- Our code can connect to the service using the original port number
|
||||
|
||||
- Perfect for internal communication, within the cluster
|
||||
|
||||
---
|
||||
|
||||
## `LoadBalancer`
|
||||
|
||||
- An external load balancer is allocated for the service
|
||||
|
||||
(typically a cloud load balancer, e.g. ELB on AWS, GLB on GCE ...)
|
||||
|
||||
- This is available only when the underlying infrastructure provides some kind of
|
||||
"load balancer as a service"
|
||||
|
||||
- Each service of that type will typically cost a little bit of money
|
||||
|
||||
(e.g. a few cents per hour on AWS or GCE)
|
||||
|
||||
- Ideally, traffic would flow directly from the load balancer to the pods
|
||||
|
||||
- In practice, it will often flow through a `NodePort` first
|
||||
|
||||
---
|
||||
|
||||
## `NodePort`
|
||||
|
||||
- A port number is allocated for the service
|
||||
|
||||
(by default, in the 30000-32768 range)
|
||||
|
||||
- That port is made available *on all our nodes* and anybody can connect to it
|
||||
|
||||
(we can connect to any node on that port to reach the service)
|
||||
|
||||
- Our code needs to be changed to connect to that new port number
|
||||
|
||||
- Under the hood: `kube-proxy` sets up a bunch of `iptables` rules on our nodes
|
||||
|
||||
- Sometimes, it's the only available option for external traffic
|
||||
|
||||
(e.g. most clusters deployed with kubeadm or on-premises)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `ExternalName`
|
||||
|
||||
- No load balancer (internal or external) is created
|
||||
|
||||
- Only a DNS entry gets added to the DNS managed by Kubernetes
|
||||
|
||||
- That DNS entry will just be a `CNAME` to a provided record
|
||||
|
||||
Example:
|
||||
```bash
|
||||
kubectl create service externalname k8s --external-name kubernetes.io
|
||||
```
|
||||
*Creates a CNAME `k8s` pointing to `kubernetes.io`*
|
||||
- the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record
|
||||
- no port, no IP address, no nothing else is allocated
|
||||
|
||||
---
|
||||
|
||||
@@ -317,28 +279,18 @@ error: the server doesn't have a resource type "endpoint"
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
## Exposing services to the outside world
|
||||
|
||||
## `ExternalIP`
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- When creating a servivce, we can also specify an `ExternalIP`
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
(this is not a type, but an extra attribute to the service)
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- It will make the service availableon this IP address
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
(if the IP address belongs to a node of the cluster)
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
---
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
## `Ingress`
|
||||
|
||||
- Ingresses are another type (kind) of resource
|
||||
|
||||
- They are specifically for HTTP services
|
||||
|
||||
(not TCP or UDP)
|
||||
|
||||
- They can also handle TLS certificates, URL rewriting ...
|
||||
|
||||
- They require an *Ingress Controller* to function
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -20,50 +20,6 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl` is the new SSH
|
||||
|
||||
- We often start managing servers with SSH
|
||||
|
||||
(installing packages, troubleshooting ...)
|
||||
|
||||
- At scale, it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use config management, central logging, etc.
|
||||
|
||||
- In many cases, we still need SSH:
|
||||
|
||||
- as the underlying access method (e.g. Ansible)
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The parallel with `kubectl`
|
||||
|
||||
- We often start managing Kubernetes clusters with `kubectl`
|
||||
|
||||
(deploying applications, troubleshooting ...)
|
||||
|
||||
- At scale (with many applications or clusters), it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use automated pipelines, observability tooling, etc.
|
||||
|
||||
- In many cases, we still need `kubectl`:
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
- The Kubernetes API is always the underlying access method
|
||||
|
||||
---
|
||||
|
||||
## `kubectl get`
|
||||
|
||||
- Let's look at our `Node` resources with `kubectl get`!
|
||||
@@ -115,7 +71,7 @@ class: extra-details
|
||||
|
||||
- Show the capacity of all our nodes as a stream of JSON objects:
|
||||
```bash
|
||||
kubectl get nodes -o json |
|
||||
kubectl get nodes -o json |
|
||||
jq ".items[] | {name:.metadata.name} + .status.capacity"
|
||||
```
|
||||
|
||||
@@ -226,6 +182,53 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The error that we see is expected: the Kubernetes API requires authentication.
|
||||
|
||||
---
|
||||
|
||||
## Listing running containers
|
||||
|
||||
- Containers are manipulated through *pods*
|
||||
@@ -464,117 +467,3 @@ class: extra-details
|
||||
|
||||
[KEP-0009]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
|
||||
|
||||
]
|
||||
|
||||
The command above should either time out, or show an authentication error. Why?
|
||||
|
||||
---
|
||||
|
||||
## Time out
|
||||
|
||||
- Connections to ClusterIP services only work *from within the cluster*
|
||||
|
||||
- If we are outside the cluster, the `curl` command will probably time out
|
||||
|
||||
(Because the IP address, e.g. 10.96.0.1, isn't routed properly outside the cluster)
|
||||
|
||||
- This is the case with most "real" Kubernetes clusters
|
||||
|
||||
- To try the connection from within the cluster, we can use [shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
---
|
||||
|
||||
## Authentication error
|
||||
|
||||
This is what we should see when connecting from within the cluster:
|
||||
```json
|
||||
$ curl -k https://10.96.0.1
|
||||
{
|
||||
"kind": "Status",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
|
||||
},
|
||||
"status": "Failure",
|
||||
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
|
||||
"reason": "Forbidden",
|
||||
"details": {
|
||||
|
||||
},
|
||||
"code": 403
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- We can see `kind`, `apiVersion`, `metadata`
|
||||
|
||||
- These are typical of a Kubernetes API reply
|
||||
|
||||
- Because we *are* talking to the Kubernetes API
|
||||
|
||||
- The Kubernetes API tells us "Forbidden"
|
||||
|
||||
(because it requires authentication)
|
||||
|
||||
- The Kubernetes API is reachable from within the cluster
|
||||
|
||||
(many apps integrating with Kubernetes will use this)
|
||||
|
||||
---
|
||||
|
||||
## DNS integration
|
||||
|
||||
- Each service also gets a DNS record
|
||||
|
||||
- The Kubernetes DNS resolver is available *from within pods*
|
||||
|
||||
(and sometimes, from within nodes, depending on configuration)
|
||||
|
||||
- Code running in pods can connect to services using their name
|
||||
|
||||
(e.g. https://kubernetes/...)
|
||||
|
||||
@@ -153,7 +153,10 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
- Leave that command running, so that we can keep an eye on these logs
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
@@ -183,44 +186,6 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
## Log streaming
|
||||
|
||||
- Let's look again at the output of `kubectl logs`
|
||||
|
||||
(the one we started before scaling up)
|
||||
|
||||
- `kubectl logs` shows us one line per second
|
||||
|
||||
- We could expect 3 lines per second
|
||||
|
||||
(since we should now have 3 pods running `ping`)
|
||||
|
||||
- Let's try to figure out what's happening!
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- What happens if we restart `kubectl logs`?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Interrupt `kubectl logs` (with Ctrl-C)
|
||||
|
||||
- Restart it:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
|
||||
|
||||
Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
@@ -231,12 +196,20 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
.exercise[
|
||||
|
||||
- In a separate window, watch the list of pods:
|
||||
- In a separate window, list pods, and keep watching them:
|
||||
```bash
|
||||
watch kubectl get pods
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
- Destroy the pod currently shown by `kubectl logs`:
|
||||
<!--
|
||||
```wait Running```
|
||||
```keys ^C```
|
||||
```hide kubectl wait deploy pingpong --for condition=available```
|
||||
```keys kubectl delete pod ping```
|
||||
```copypaste pong-..........-.....```
|
||||
-->
|
||||
|
||||
- Destroy a pod:
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
@@ -244,23 +217,6 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
## What happened?
|
||||
|
||||
- `kubectl delete pod` terminates the pod gracefully
|
||||
|
||||
(sending it the TERM signal and waiting for it to shutdown)
|
||||
|
||||
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
|
||||
|
||||
- But we can still see the output of the "Terminating" pod in `kubectl logs`
|
||||
|
||||
- Until 30 seconds later, when the grace period expires
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What if we wanted something different?
|
||||
|
||||
- What if we wanted to start a "one-shot" container that *doesn't* get restarted?
|
||||
@@ -278,72 +234,6 @@ Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
## Scheduling periodic background work
|
||||
|
||||
- A Cron Job is a job that will be executed at specific intervals
|
||||
|
||||
(the name comes from the traditional cronjobs executed by the UNIX crond)
|
||||
|
||||
- It requires a *schedule*, represented as five space-separated fields:
|
||||
|
||||
- minute [0,59]
|
||||
- hour [0,23]
|
||||
- day of the month [1,31]
|
||||
- month of the year [1,12]
|
||||
- day of the week ([0,6] with 0=Sunday)
|
||||
|
||||
- `*` means "all valid values"; `/N` means "every N"
|
||||
|
||||
- Example: `*/3 * * * *` means "every three minutes"
|
||||
|
||||
---
|
||||
|
||||
## Creating a Cron Job
|
||||
|
||||
- Let's create a simple job to be executed every three minutes
|
||||
|
||||
- Cron Jobs need to terminate, otherwise they'd run forever
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Cron Job:
|
||||
```bash
|
||||
kubectl run --schedule="*/3 * * * *" --restart=OnFailure --image=alpine sleep 10
|
||||
```
|
||||
|
||||
- Check the resource that was created:
|
||||
```bash
|
||||
kubectl get cronjobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cron Jobs in action
|
||||
|
||||
- At the specified schedule, the Cron Job will create a Job
|
||||
|
||||
- The Job will create a Pod
|
||||
|
||||
- The Job will make sure that the Pod completes
|
||||
|
||||
(re-creating another one if it fails, for instance if its node fails)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the Jobs that are created:
|
||||
```bash
|
||||
kubectl get jobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(It will take a few minutes before the first job is scheduled.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What about that deprecation warning?
|
||||
|
||||
- As we can see from the previous slide, `kubectl run` can do many things
|
||||
|
||||
@@ -34,11 +34,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.15.5/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.15.5/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.15.3/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.15.5/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
|
||||
@@ -66,8 +66,6 @@ Exactly what we need!
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
- On OS X, just `brew install stern`
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
---
|
||||
|
||||
@@ -218,18 +218,6 @@ class: extra-details
|
||||
|
||||
## What's going on?
|
||||
|
||||
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
|
||||
|
||||
- It lets the container engine use a default network
|
||||
|
||||
(in that case, we end up with the default Docker bridge)
|
||||
|
||||
- Our pods are running on independent, disconnected, host-local networks
|
||||
|
||||
---
|
||||
|
||||
## What do we need to do?
|
||||
|
||||
- On a normal cluster, kubelet is configured to set up pod networking with CNI plugins
|
||||
|
||||
- This requires:
|
||||
@@ -240,6 +228,14 @@ class: extra-details
|
||||
|
||||
- running kubelet with `--network-plugin=cni`
|
||||
|
||||
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
|
||||
|
||||
- It lets the container engine use a default network
|
||||
|
||||
(in that case, we end up with the default Docker bridge)
|
||||
|
||||
- Our pods are running on independent, disconnected, host-local networks
|
||||
|
||||
---
|
||||
|
||||
## Using network plugins
|
||||
@@ -398,7 +394,7 @@ class: extra-details
|
||||
|
||||
- Start kube-proxy:
|
||||
```bash
|
||||
sudo kube-proxy --kubeconfig ~/.kube/config
|
||||
sudo kube-proxy --kubeconfig ~/kubeconfig
|
||||
```
|
||||
|
||||
- Expose our Deployment:
|
||||
|
||||
@@ -11,36 +11,16 @@
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
kubectl create deployment hasher --image=dockercoins/hasher:v0.1
|
||||
kubectl create deployment rng --image=dockercoins/rng:v0.1
|
||||
kubectl create deployment webui --image=dockercoins/webui:v0.1
|
||||
kubectl create deployment worker --image=dockercoins/worker:v0.1
|
||||
set -u
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Deploying other images
|
||||
|
||||
- If we wanted to deploy images from another registry ...
|
||||
|
||||
- ... Or with a different tag ...
|
||||
|
||||
- ... We could use the following snippet:
|
||||
|
||||
```bash
|
||||
REGISTRY=dockercoins
|
||||
TAG=v0.1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Is this working?
|
||||
|
||||
- After waiting for the deployment to complete, let's look at the logs!
|
||||
@@ -131,7 +111,7 @@ We should now see the `worker`, well, working happily.
|
||||
|
||||
- Create a `NodePort` service for the Web UI:
|
||||
```bash
|
||||
kubectl expose deploy/webui --type=`NodePort` --port=80
|
||||
kubectl expose deploy/webui --type=NodePort --port=80
|
||||
```
|
||||
|
||||
- Check the port that was allocated:
|
||||
@@ -141,8 +121,6 @@ We should now see the `worker`, well, working happily.
|
||||
|
||||
]
|
||||
|
||||
.warning[On PKS, replace `NodePort` with `LoadBalancer`.]
|
||||
|
||||
---
|
||||
|
||||
## Accessing the web UI
|
||||
@@ -155,14 +133,8 @@ We should now see the `worker`, well, working happily.
|
||||
|
||||
<!-- ```open http://node1:3xxxx/``` -->
|
||||
|
||||
- On PKS, you will have to use the EXTERNAL-IP shown on the `webui` line
|
||||
|
||||
(and you can connect to port 80, yay!)
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
||||
--
|
||||
|
||||
Yes, this may take a little while to update. *(Narrator: it was DNS.)*
|
||||
|
||||
@@ -240,25 +240,6 @@ If you want to use an external key/value store, add one of the following:
|
||||
|
||||
---
|
||||
|
||||
## Check our default Storage Class
|
||||
|
||||
- The YAML manifest applied earlier should define a default storage class
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we have a default storage class:
|
||||
```bash
|
||||
kubectl get storageclass
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There should be a storage class showing as `portworx-replicated (default)`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Our default Storage Class
|
||||
|
||||
This is our Storage Class (in `k8s/storage-class.yaml`):
|
||||
@@ -284,6 +265,28 @@ parameters:
|
||||
|
||||
---
|
||||
|
||||
## Creating our Storage Class
|
||||
|
||||
- Let's apply that YAML file!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Storage Class:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/storage-class.yaml
|
||||
```
|
||||
|
||||
- Check that it is now available:
|
||||
```bash
|
||||
kubectl get sc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should show as `portworx-replicated (default)`.
|
||||
|
||||
---
|
||||
|
||||
## Our Postgres Stateful set
|
||||
|
||||
- The next slide shows `k8s/postgres.yaml`
|
||||
@@ -323,7 +326,7 @@ spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
@@ -60,11 +60,9 @@
|
||||
|
||||
(by default: every minute; can be more/less frequent)
|
||||
|
||||
- The list of URLs to scrape (the *scrape targets*) is defined in configuration
|
||||
- If you're worried about parsing overhead: exporters can also use protobuf
|
||||
|
||||
.footnote[Worried about the overhead of parsing a text format?
|
||||
<br/>
|
||||
Check this [comparison](https://github.com/RichiH/OpenMetrics/blob/master/markdown/protobuf_vs_text.md) of the text format with the (now deprecated) protobuf format!]
|
||||
- The list of URLs to scrape (the *scrape targets*) is defined in configuration
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -14,27 +14,7 @@
|
||||
|
||||
## Rolling updates
|
||||
|
||||
- With rolling updates, when a Deployment is updated, it happens progressively
|
||||
|
||||
- The Deployment controls multiple Replica Sets
|
||||
|
||||
- Each Replica Set is a group of identical Pods
|
||||
|
||||
(with the same image, arguments, parameters ...)
|
||||
|
||||
- During the rolling update, we have at least two Replica Sets:
|
||||
|
||||
- the "new" set (corresponding to the "target" version)
|
||||
|
||||
- at least one "old" set
|
||||
|
||||
- We can have multiple "old" sets
|
||||
|
||||
(if we start another update before the first one is done)
|
||||
|
||||
---
|
||||
|
||||
## Update strategy
|
||||
- With rolling updates, when a resource is updated, it happens progressively
|
||||
|
||||
- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge`
|
||||
|
||||
@@ -81,6 +61,32 @@
|
||||
|
||||
---
|
||||
|
||||
## Building a new version of the `worker` service
|
||||
|
||||
.warning[
|
||||
Only run these commands if you have built and pushed DockerCoins to a local registry.
|
||||
<br/>
|
||||
If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip this.
|
||||
]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stacks` directory (`~/container.training/stacks`)
|
||||
|
||||
- Edit `dockercoins/worker/worker.py`; update the first `sleep` line to sleep 1 second
|
||||
|
||||
- Build a new tag and push it to the registry:
|
||||
```bash
|
||||
#export REGISTRY=localhost:3xxxx
|
||||
export TAG=v0.2
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Rolling out the new `worker` service
|
||||
|
||||
.exercise[
|
||||
@@ -99,7 +105,7 @@
|
||||
|
||||
- Update `worker` either with `kubectl edit`, or by running:
|
||||
```bash
|
||||
kubectl set image deploy worker worker=dockercoins/worker:v0.2
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
```
|
||||
|
||||
]
|
||||
@@ -140,7 +146,8 @@ That rollout should be pretty quick. What shows in the web UI?
|
||||
|
||||
- Update `worker` by specifying a non-existent image:
|
||||
```bash
|
||||
kubectl set image deploy worker worker=dockercoins/worker:v0.3
|
||||
export TAG=v0.3
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
```
|
||||
|
||||
- Check what's going on:
|
||||
@@ -209,14 +216,27 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to the dashboard that we deployed earlier
|
||||
|
||||
- Check that we have failures in Deployments, Pods, and Replica Sets
|
||||
|
||||
- Can we see the reason for the failure?
|
||||
- Check which port the dashboard is on:
|
||||
```bash
|
||||
kubectl -n kube-system get svc socat
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note the `3xxxx` port.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to http://oneofournodes:3xxxx/
|
||||
|
||||
<!-- ```open https://node1:3xxxx/``` -->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- We have failures in Deployments, Pods, and Replica Sets
|
||||
|
||||
---
|
||||
|
||||
## Recovering from a bad rollout
|
||||
@@ -245,137 +265,6 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
|
||||
|
||||
---
|
||||
|
||||
## Rolling back to an older version
|
||||
|
||||
- We reverted to `v0.2`
|
||||
|
||||
- But this version still has a performance problem
|
||||
|
||||
- How can we get back to the previous version?
|
||||
|
||||
---
|
||||
|
||||
## Multiple "undos"
|
||||
|
||||
- What happens if we try `kubectl rollout undo` again?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try it:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker
|
||||
```
|
||||
|
||||
- Check the web UI, the list of pods ...
|
||||
|
||||
]
|
||||
|
||||
🤔 That didn't work.
|
||||
|
||||
---
|
||||
|
||||
## Multiple "undos" don't work
|
||||
|
||||
- If we see successive versions as a stack:
|
||||
|
||||
- `kubectl rollout undo` doesn't "pop" the last element from the stack
|
||||
|
||||
- it copies the N-1th element to the top
|
||||
|
||||
- Multiple "undos" just swap back and forth between the last two versions!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go back to v0.2 again:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## In this specific scenario
|
||||
|
||||
- Our version numbers are easy to guess
|
||||
|
||||
- What if we had used git hashes?
|
||||
|
||||
- What if we had changed other parameters in the Pod spec?
|
||||
|
||||
---
|
||||
|
||||
## Listing versions
|
||||
|
||||
- We can list successive versions of a Deployment with `kubectl rollout history`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at our successive versions:
|
||||
```bash
|
||||
kubectl rollout history deployment worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We don't see *all* revisions.
|
||||
|
||||
We might see something like 1, 4, 5.
|
||||
|
||||
(Depending on how many "undos" we did before.)
|
||||
|
||||
---
|
||||
|
||||
## Explaining deployment revisions
|
||||
|
||||
- These revisions correspond to our Replica Sets
|
||||
|
||||
- This information is stored in the Replica Set annotations
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the annotations for our replica sets:
|
||||
```bash
|
||||
kubectl describe replicasets -l app=worker | grep -A3
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about the missing revisions?
|
||||
|
||||
- The missing revisions are stored in another annotation:
|
||||
|
||||
`deployment.kubernetes.io/revision-history`
|
||||
|
||||
- These are not shown in `kubectl rollout history`
|
||||
|
||||
- We could easily reconstruct the full list with a script
|
||||
|
||||
(if we wanted to!)
|
||||
|
||||
---
|
||||
|
||||
## Rolling back to an older version
|
||||
|
||||
- `kubectl rollout undo` can work with a revision number
|
||||
|
||||
.exercise[
|
||||
|
||||
- Roll back to the "known good" deployment version:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker --to-revision=1
|
||||
```
|
||||
|
||||
- Check the web UI or the list of pods
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Changing rollout parameters
|
||||
@@ -396,7 +285,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: dockercoins/worker:v0.1
|
||||
image: $REGISTRY/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
@@ -427,7 +316,7 @@ class: extra-details
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: dockercoins/worker:v0.1
|
||||
image: $REGISTRY/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
|
||||
@@ -61,8 +61,7 @@
|
||||
|
||||
- [minikube](https://kubernetes.io/docs/setup/minikube/),
|
||||
[kubespawn](https://github.com/kinvolk/kube-spawn),
|
||||
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/),
|
||||
[kind](https://kind.sigs.k8s.io):
|
||||
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/):
|
||||
for local development
|
||||
|
||||
- [kubicorn](https://github.com/kubicorn/kubicorn),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.15.3
|
||||
- Kubernetes 1.15.5
|
||||
- Docker Engine 19.03.1
|
||||
- Docker Compose 1.24.1
|
||||
|
||||
|
||||
@@ -66,87 +66,7 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## Adding a volume to a Pod
|
||||
|
||||
- We will start with the simplest Pod manifest we can find
|
||||
|
||||
- We will add a volume to that Pod manifest
|
||||
|
||||
- We will mount that volume in a container in the Pod
|
||||
|
||||
- By default, this volume will be an `emptyDir`
|
||||
|
||||
(an empty directory)
|
||||
|
||||
- It will "shadow" the directory where it's mounted
|
||||
|
||||
---
|
||||
|
||||
## Our basic Pod
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
```
|
||||
|
||||
This is a MVP! (Minimum Viable Pod😉)
|
||||
|
||||
It runs a single NGINX container.
|
||||
|
||||
---
|
||||
|
||||
## Trying the basic pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Pod:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/nginx-1-without-volume.yaml
|
||||
```
|
||||
|
||||
- Get its IP address:
|
||||
```bash
|
||||
IPADDR=$(kubectl get pod nginx-without-volume -o jsonpath={.status.podIP})
|
||||
```
|
||||
|
||||
- Send a request with curl:
|
||||
```bash
|
||||
curl $IPADDR
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(We should see the "Welcome to NGINX" page.)
|
||||
|
||||
---
|
||||
|
||||
## Adding a volume
|
||||
|
||||
- We need to add the volume in two places:
|
||||
|
||||
- at the Pod level (to declare the volume)
|
||||
|
||||
- at the container level (to mount the volume)
|
||||
|
||||
- We will declare a volume named `www`
|
||||
|
||||
- No type is specified, so it will default to `emptyDir`
|
||||
|
||||
(as the name implies, it will be initialized as an empty directory at pod creation)
|
||||
|
||||
- In that pod, there is also a container named `nginx`
|
||||
|
||||
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
|
||||
|
||||
---
|
||||
|
||||
## The Pod with a volume
|
||||
## A simple volume example
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -166,57 +86,30 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Trying the Pod with a volume
|
||||
## A simple volume example, explained
|
||||
|
||||
.exercise[
|
||||
- We define a standalone `Pod` named `nginx-with-volume`
|
||||
|
||||
- Create the Pod:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/nginx-2-with-volume.yaml
|
||||
```
|
||||
- In that pod, there is a volume named `www`
|
||||
|
||||
- Get its IP address:
|
||||
```bash
|
||||
IPADDR=$(kubectl get pod nginx-with-volume -o jsonpath={.status.podIP})
|
||||
```
|
||||
- No type is specified, so it will default to `emptyDir`
|
||||
|
||||
- Send a request with curl:
|
||||
```bash
|
||||
curl $IPADDR
|
||||
```
|
||||
(as the name implies, it will be initialized as an empty directory at pod creation)
|
||||
|
||||
]
|
||||
- In that pod, there is also a container named `nginx`
|
||||
|
||||
(We should now see a "403 Forbidden" error page.)
|
||||
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
|
||||
|
||||
---
|
||||
|
||||
## Populating the volume with another container
|
||||
|
||||
- Let's add another container to the Pod
|
||||
|
||||
- Let's mount the volume in *both* containers
|
||||
|
||||
- That container will populate the volume with static files
|
||||
|
||||
- NGINX will then serve these static files
|
||||
|
||||
- To populate the volume, we will clone the Spoon-Knife repository
|
||||
|
||||
- this repository is https://github.com/octocat/Spoon-Knife
|
||||
|
||||
- it's very popular (more than 100K stars!)
|
||||
|
||||
---
|
||||
|
||||
## Sharing a volume between two containers
|
||||
## A volume shared between two containers
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -254,72 +147,30 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Trying the shared volume
|
||||
## Sharing a volume, in action
|
||||
|
||||
- This one will be time-sensitive!
|
||||
|
||||
- We need to catch the Pod IP address *as soon as it's created*
|
||||
|
||||
- Then send a request to it *as fast as possible*
|
||||
- Let's try it!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Watch the pods (so that we can catch the Pod IP address)
|
||||
- Create the pod by applying the YAML file:
|
||||
```bash
|
||||
kubectl get pods -o wide --watch
|
||||
kubectl apply -f ~/container.training/k8s/nginx-with-volume.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Shared volume in action
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the pod:
|
||||
- Check the IP address that was allocated to our pod:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/nginx-3-with-git.yaml
|
||||
kubectl get pod nginx-with-volume -o wide
|
||||
IP=$(kubectl get pod nginx-with-volume -o json | jq -r .status.podIP)
|
||||
```
|
||||
|
||||
- As soon as we see its IP address, access it:
|
||||
```bash
|
||||
curl $IP
|
||||
```
|
||||
|
||||
- A few seconds later, the state of the pod will change; access it again:
|
||||
- Access the web server:
|
||||
```bash
|
||||
curl $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The first time, we should see "403 Forbidden".
|
||||
|
||||
The second time, we should see the HTML file from the Spoon-Knife repository.
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- Both containers are started at the same time
|
||||
|
||||
- NGINX starts very quickly
|
||||
|
||||
(it can serve requests immediately)
|
||||
|
||||
- But at this point, the volume is empty
|
||||
|
||||
(NGINX serves "403 Forbidden")
|
||||
|
||||
- The other containers installs git and clones the repository
|
||||
|
||||
(this takes a bit longer)
|
||||
|
||||
- When the other container is done, the volume holds the repository
|
||||
|
||||
(NGINX serves the HTML file)
|
||||
|
||||
---
|
||||
|
||||
## The devil is in the details
|
||||
@@ -332,100 +183,13 @@ The second time, we should see the HTML file from the Spoon-Knife repository.
|
||||
|
||||
- That's why we specified `restartPolicy: OnFailure`
|
||||
|
||||
---
|
||||
|
||||
## Inconsistencies
|
||||
|
||||
- There is a short period of time during which the website is not available
|
||||
|
||||
(because the `git` container hasn't done its job yet)
|
||||
|
||||
- With a bigger website, we could get inconsistent results
|
||||
- This could be avoided by using [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
|
||||
|
||||
(where only a part of the content is ready)
|
||||
|
||||
- In real applications, this could cause incorrect results
|
||||
|
||||
- How can we avoid that?
|
||||
|
||||
---
|
||||
|
||||
## Init Containers
|
||||
|
||||
- We can define containers that should execute *before* the main ones
|
||||
|
||||
- They will be executed in order
|
||||
|
||||
(instead of in parallel)
|
||||
|
||||
- They must all succeed before the main containers are started
|
||||
|
||||
- This is *exactly* what we need here!
|
||||
|
||||
- Let's see one in action
|
||||
|
||||
.footnote[See [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) documentation for all the details.]
|
||||
|
||||
---
|
||||
|
||||
## Defining Init Containers
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying the init container
|
||||
|
||||
- Repeat the same operation as earlier
|
||||
|
||||
(try to send HTTP requests as soon as the pod comes up)
|
||||
|
||||
- This time, instead of "403 Forbidden" we get a "connection refused"
|
||||
|
||||
- NGINX doesn't start until the git container has done its job
|
||||
|
||||
- We never get inconsistent results
|
||||
|
||||
(a "half-ready" container)
|
||||
|
||||
---
|
||||
|
||||
## Other uses of init containers
|
||||
|
||||
- Load content
|
||||
|
||||
- Generate configuration (or certificates)
|
||||
|
||||
- Database migrations
|
||||
|
||||
- Waiting for other services to be up
|
||||
|
||||
(to avoid flurry of connection errors in main container)
|
||||
|
||||
- etc.
|
||||
(we will see a live example in a few sections)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -20,9 +20,10 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Options for our first production cluster
|
||||
|
||||
- Use a managed cluster (AKS, EKS, GKE, PKS...)
|
||||
- Get a managed cluster from a major cloud provider (AKS, EKS, GKE...)
|
||||
|
||||
(price: $, difficulty: medium)
|
||||
|
||||
@@ -206,157 +207,59 @@ And *then* it is time to look at orchestration!
|
||||
|
||||
---
|
||||
|
||||
## Congratulations!
|
||||
## Managing stack deployments
|
||||
|
||||
- We learned a lot about Kubernetes, its internals, its advanced concepts
|
||||
- The best deployment tool will vary, depending on:
|
||||
|
||||
- the size and complexity of your stack(s)
|
||||
- how often you change it (i.e. add/remove components)
|
||||
- the size and skills of your team
|
||||
|
||||
- A few examples:
|
||||
|
||||
- shell scripts invoking `kubectl`
|
||||
- YAML resources descriptions committed to a repo
|
||||
- [Helm](https://github.com/kubernetes/helm) (~package manager)
|
||||
- [Spinnaker](https://www.spinnaker.io/) (Netflix' CD platform)
|
||||
- [Brigade](https://brigade.sh/) (event-driven scripting; no YAML)
|
||||
|
||||
---
|
||||
|
||||
## Cluster federation
|
||||
|
||||
--
|
||||
|
||||
- That was just the easy part
|
||||
|
||||
- The hard challenges will revolve around *culture* and *people*
|
||||

|
||||
|
||||
--
|
||||
|
||||
- ... What does that mean?
|
||||
Sorry Star Trek fans, this is not the federation you're looking for!
|
||||
|
||||
--
|
||||
|
||||
(If I add "Your cluster is in another federation" I might get a 3rd fandom wincing!)
|
||||
|
||||
---
|
||||
|
||||
## Running an app involves many steps
|
||||
## Cluster federation
|
||||
|
||||
- Write the app
|
||||
- Kubernetes master operation relies on etcd
|
||||
|
||||
- Tests, QA ...
|
||||
- etcd uses the [Raft](https://raft.github.io/) protocol
|
||||
|
||||
- Ship *something* (more on that later)
|
||||
- Raft recommends low latency between nodes
|
||||
|
||||
- Provision resources (e.g. VMs, clusters)
|
||||
- What if our cluster spreads to multiple regions?
|
||||
|
||||
- Deploy the *something* on the resources
|
||||
--
|
||||
|
||||
- Manage, maintain, monitor the resources
|
||||
- Break it down in local clusters
|
||||
|
||||
- Manage, maintain, monitor the app
|
||||
- Regroup them in a *cluster federation*
|
||||
|
||||
- And much more
|
||||
- Synchronize resources across clusters
|
||||
|
||||
---
|
||||
|
||||
## Who does what?
|
||||
|
||||
- The old "devs vs ops" division has changed
|
||||
|
||||
- In some organizations, "ops" are now called "SRE" or "platform" teams
|
||||
|
||||
(and they have very different sets of skills)
|
||||
|
||||
- Do you know which team is responsible for each item on the list on the previous page?
|
||||
|
||||
- Acknowledge that a lot of tasks are outsourced
|
||||
|
||||
(e.g. if we add "buy/rack/provision machines" in that list)
|
||||
|
||||
---
|
||||
|
||||
## What do we ship?
|
||||
|
||||
- Some organizations embrace "you build it, you run it"
|
||||
|
||||
- When "build" and "run" are owned by different teams, where's the line?
|
||||
|
||||
- What does the "build" team ship to the "run" team?
|
||||
|
||||
- Let's see a few options, and what they imply
|
||||
|
||||
---
|
||||
|
||||
## Shipping code
|
||||
|
||||
- Team "build" ships code
|
||||
|
||||
(hopefully in a repository, identified by a commit hash)
|
||||
|
||||
- Team "run" containerizes that code
|
||||
|
||||
✔️ no extra work for developers
|
||||
|
||||
❌ very little advantage of using containers
|
||||
|
||||
---
|
||||
|
||||
## Shipping container images
|
||||
|
||||
- Team "build" ships container images
|
||||
|
||||
(hopefully built automatically from a source repository)
|
||||
|
||||
- Team "run" uses theses images to create e.g. Kubernetes resources
|
||||
|
||||
✔️ universal artefact (support all languages uniformly)
|
||||
|
||||
✔️ easy to start a single component (good for monoliths)
|
||||
|
||||
❌ complex applications will require a lot of extra work
|
||||
|
||||
❌ adding/removing components in the stack also requires extra work
|
||||
|
||||
❌ complex applications will run very differently between dev and prod
|
||||
|
||||
---
|
||||
|
||||
## Shipping Compose files
|
||||
|
||||
(Or another kind of dev-centric manifest)
|
||||
|
||||
- Team "build" ships a manifest that works on a single node
|
||||
|
||||
(as well as images, or ways to build them)
|
||||
|
||||
- Team "run" adapts that manifest to work on a cluster
|
||||
|
||||
✔️ all teams can start the stack in a reliable, deterministic manner
|
||||
|
||||
❌ adding/removing components still requires *some* work (but less than before)
|
||||
|
||||
❌ there will be *some* differences between dev and prod
|
||||
|
||||
---
|
||||
|
||||
## Shipping Kubernetes manifests
|
||||
|
||||
- Team "build" ships ready-to-run manifests
|
||||
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
✔️ parity between dev and prod environments
|
||||
|
||||
✔️ "run" team can focus on SLAs, SLOs, and overall quality
|
||||
|
||||
❌ requires *a lot* of extra work (and new skills) from the "build" team
|
||||
|
||||
❌ Kubernetes is not a very convenient development platform (at least, not yet)
|
||||
|
||||
---
|
||||
|
||||
## What's the right answer?
|
||||
|
||||
- It depends on our teams
|
||||
|
||||
- existing skills (do they know how to do it?)
|
||||
|
||||
- availability (do they have the time to do it?)
|
||||
|
||||
- potential skills (can they learn to do it?)
|
||||
|
||||
- It depends on our culture
|
||||
|
||||
- owning "run" often implies being on call
|
||||
|
||||
- do we reward on-call duty without encouraging hero syndrome?
|
||||
|
||||
- do we give people resources (time, money) to learn?
|
||||
- Discover resources across clusters
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
# Deploying with YAML
|
||||
|
||||
- So far, we created resources with the following commands:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- `kubectl create deployment`
|
||||
|
||||
- `kubectl expose`
|
||||
|
||||
- We can also create resources directly with YAML manifests
|
||||
|
||||
---
|
||||
|
||||
## `kubectl apply` vs `create`
|
||||
|
||||
- `kubectl create -f whatever.yaml`
|
||||
|
||||
- creates resources if they don't exist
|
||||
|
||||
- if resources already exist, don't alter them
|
||||
<br/>(and display error message)
|
||||
|
||||
- `kubectl apply -f whatever.yaml`
|
||||
|
||||
- creates resources if they don't exist
|
||||
|
||||
- if resources already exist, update them
|
||||
<br/>(to match the definition provided by the YAML file)
|
||||
|
||||
- stores the manifest as an *annotation* in the resource
|
||||
|
||||
---
|
||||
|
||||
## Creating multiple resources
|
||||
|
||||
- The manifest can contain multiple resources separated by `---`
|
||||
|
||||
```yaml
|
||||
kind: ...
|
||||
apiVersion: ...
|
||||
metadata: ...
|
||||
name: ...
|
||||
...
|
||||
---
|
||||
kind: ...
|
||||
apiVersion: ...
|
||||
metadata: ...
|
||||
name: ...
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating multiple resources
|
||||
|
||||
- The manifest can also contain a list of resources
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
items:
|
||||
- kind: ...
|
||||
apiVersion: ...
|
||||
...
|
||||
- kind: ...
|
||||
apiVersion: ...
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying dockercoins with YAML
|
||||
|
||||
- We provide a YAML manifest with all the resources for Dockercoins
|
||||
|
||||
(Deployments and Services)
|
||||
|
||||
- We can use it if we need to deploy or redeploy Dockercoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy or redeploy Dockercoins:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(If we deployed Dockercoins earlier, we will see warning messages,
|
||||
because the resources that we created lack the necessary annotation.
|
||||
We can safely ignore them.)
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
title: |
|
||||
Kubernetes Meetup
|
||||
@
|
||||
VMware
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://vmware-2019-11.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
-
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
#- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
-
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/logs-cli.md
|
||||
- vmware/vrli.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- vmware/nsxt.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-k8s.md
|
||||
- vmware/pks.md
|
||||
#- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- |
|
||||
## Scaling `rng`
|
||||
|
||||
- Let's scale the `rng` service just like we scaled `worker`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Scale `rng`:
|
||||
```bash
|
||||
kubectl scale deploy rng --replicas=2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The web UI graph should go past 10 hashes/second.
|
||||
|
||||
- vmware/vrops.md
|
||||
#- shared/hastyconclusions.md
|
||||
#- k8s/daemonset.md
|
||||
#- k8s/dryrun.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
#- k8s/record.md
|
||||
- k8s/namespaces.md
|
||||
-
|
||||
#- k8s/ingress.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/logs-centralized.md
|
||||
#- k8s/prometheus.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
- vmware/vsan.md
|
||||
- k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,23 +1,19 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
Kubernetes
|
||||
Developer Training
|
||||
|
||||
chat: "(None)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
slides: http://clt-2019-10.container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
@@ -41,22 +37,21 @@ chapters:
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
-
|
||||
- k8s/dryrun.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
@@ -70,29 +65,26 @@ chapters:
|
||||
-
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
-
|
||||
- k8s/csr-api.md
|
||||
- k8s/openid-connect.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/openid-connect.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
-
|
||||
- k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
-
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/prometheus.md
|
||||
-
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
-
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
- k8s/operators-design.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/owners-and-dependents.md
|
||||
- k8s/gitworkflows.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/operators.md
|
||||
#- k8s/operators-design.md
|
||||
#- k8s/staticpods.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/gitworkflows.md
|
||||
-
|
||||
- k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
@@ -1,10 +1,12 @@
|
||||
## Intros
|
||||
|
||||
- Hello! We are:
|
||||
- Hello! I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
|
||||
- Brice ([@bdereims](https://twitter.com/bdereims), VMware)
|
||||
- The training will run from 9am to 5pm
|
||||
|
||||
- Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix)
|
||||
- There will be a lunch break
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ def flatten(titles):
|
||||
|
||||
|
||||
def generatefromyaml(manifest, filename):
|
||||
manifest = yaml.safe_load(manifest)
|
||||
manifest = yaml.load(manifest)
|
||||
|
||||
markdown, titles = processchapter(manifest["chapters"], filename)
|
||||
logging.debug("Found {} titles.".format(len(titles)))
|
||||
@@ -111,7 +111,6 @@ def generatefromyaml(manifest, filename):
|
||||
html = html.replace("@@GITREPO@@", manifest["gitrepo"])
|
||||
html = html.replace("@@SLIDES@@", manifest["slides"])
|
||||
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
|
||||
html = html.replace("@@SLIDENUMBERPREFIX@@", manifest.get("slidenumberprefix", ""))
|
||||
return html
|
||||
|
||||
|
||||
|
||||
@@ -4,12 +4,7 @@ class: in-person
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client:
|
||||
```bash
|
||||
ssh `user`@`A.B.C.D`
|
||||
```
|
||||
|
||||
(Replace `user` and `A.B.C.D` with the user and IP address provided to you)
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
@@ -23,13 +18,16 @@ done
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
You should see a prompt looking like this:
|
||||
```
|
||||
[A.B.C.D] (...) user@node1 ~
|
||||
$
|
||||
```
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
@@ -54,20 +52,6 @@ If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## For a consistent Kubernetes experience ...
|
||||
|
||||
- If you are using your own Kubernetes cluster, you can use [shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
- `shpod` provides a shell running in a pod on your own cluster
|
||||
|
||||
- It comes with many tools pre-installed (helm, stern...)
|
||||
|
||||
- These tools are used in many exercises in these slides
|
||||
|
||||
- `shpod` also gives you completion and a fancy prompt
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
@@ -50,32 +50,12 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
- Go to @@SLIDES@@ to view these slides
|
||||
|
||||
<!--
|
||||
- Join the chat room: @@CHAT@@
|
||||
-->
|
||||
|
||||
<!-- ```open @@SLIDES@@``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Navigating slides
|
||||
|
||||
- Use arrows to move to next/previous slide
|
||||
|
||||
(up, down, left, right, page up, page down)
|
||||
|
||||
- Type a slide number + ENTER to go to that slide
|
||||
|
||||
- The slide number is also visible in the URL bar
|
||||
|
||||
(e.g. .../#123 for slide 123)
|
||||
|
||||
- Slides will remain online so you can review them later if needed
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Where are we going to run our containers?
|
||||
|
||||
@@ -26,7 +26,9 @@ fi
|
||||
|
||||
---
|
||||
|
||||
## Having a look at the application
|
||||
## Downloading and running the application
|
||||
|
||||
Let's start this before we look around, as downloading will take a little time...
|
||||
|
||||
.exercise[
|
||||
|
||||
@@ -35,22 +37,20 @@ fi
|
||||
cd ~/container.training/dockercoins
|
||||
```
|
||||
|
||||
- Check the files and directories:
|
||||
- Use Compose to build and run all containers:
|
||||
```bash
|
||||
tree
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait units of work done```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing the application
|
||||
|
||||
- Jérôme is going to wear his developer hat ...
|
||||
|
||||
- ... start the application on his developer's machine ...
|
||||
|
||||
- ... and wait for the app to be up and running.
|
||||
Compose tells Docker to build all container images (pulling
|
||||
the corresponding base images), then starts all containers,
|
||||
and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
@@ -165,6 +165,26 @@ https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/wo
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Links, naming, and service discovery
|
||||
|
||||
- Containers can have network aliases (resolvable through DNS)
|
||||
|
||||
- Compose file version 2+ makes each container reachable through its service name
|
||||
|
||||
- Compose file version 1 required "links" sections to accomplish this
|
||||
|
||||
- Network aliases are automatically namespaced
|
||||
|
||||
- you can have multiple apps declaring and using a service named `database`
|
||||
|
||||
- containers in the blue app will resolve `database` to the IP of the blue database
|
||||
|
||||
- containers in the green app will resolve `database` to the IP of the green database
|
||||
|
||||
---
|
||||
|
||||
## Show me the code!
|
||||
|
||||
- You can check the GitHub repository with all the materials of this workshop:
|
||||
@@ -190,6 +210,24 @@ https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/wo
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file format version
|
||||
|
||||
*This is relevant only if you have used Compose before 2016...*
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
|
||||
---
|
||||
|
||||
## Our application at work
|
||||
|
||||
- On the left-hand side, the "rainbow strip" shows the container names
|
||||
@@ -208,6 +246,18 @@ https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/wo
|
||||
|
||||
- The `webui` container exposes a web dashboard; let's view it
|
||||
|
||||
.exercise[
|
||||
|
||||
- With a web browser, connect to `node1` on port 8000
|
||||
|
||||
- Remember: the `nodeX` aliases are valid only on the nodes themselves
|
||||
|
||||
- In your browser, you need to enter the IP address of your node
|
||||
|
||||
<!-- ```open http://node1:8000``` -->
|
||||
|
||||
]
|
||||
|
||||
A drawing area should show up, and after a few seconds, a blue
|
||||
graph will appear.
|
||||
|
||||
@@ -233,3 +283,74 @@ work on a local environment, or when using Docker Desktop for Mac or Windows.
|
||||
How to fix this?
|
||||
|
||||
Stop the app with `^C`, edit `dockercoins.yml`, comment out the `volumes` section, and try again.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why does the speed seem irregular?
|
||||
|
||||
- It *looks like* the speed is approximately 4 hashes/second
|
||||
|
||||
- Or more precisely: 4 hashes/second, with regular dips down to zero
|
||||
|
||||
- Why?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- The app actually has a constant, steady speed: 3.33 hashes/second
|
||||
<br/>
|
||||
(which corresponds to 1 hash every 0.3 seconds, for *reasons*)
|
||||
|
||||
- Yes, and?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The reason why this graph is *not awesome*
|
||||
|
||||
- The worker doesn't update the counter after every loop, but up to once per second
|
||||
|
||||
- The speed is computed by the browser, checking the counter about once per second
|
||||
|
||||
- Between two consecutive updates, the counter will increase either by 4, or by 0
|
||||
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - 0 etc.
|
||||
|
||||
- What can we conclude from this?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- "I'm clearly incapable of writing good frontend code!" 😀 — Jérôme
|
||||
|
||||
---
|
||||
|
||||
## Stopping the application
|
||||
|
||||
- If we interrupt Compose (with `^C`), it will politely ask the Docker Engine to stop the app
|
||||
|
||||
- The Docker Engine will send a `TERM` signal to the containers
|
||||
|
||||
- If the containers do not exit in a timely manner, the Engine sends a `KILL` signal
|
||||
|
||||
.exercise[
|
||||
|
||||
- Stop the application by hitting `^C`
|
||||
|
||||
<!--
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Some containers exit immediately, others take longer.
|
||||
|
||||
The containers that do not handle `SIGTERM` end up being killed after a 10s timeout. If we are very impatient, we can hit `^C` a second time!
|
||||
|
||||
|
||||
@@ -9,3 +9,15 @@ class: title, in-person
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: fullpic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: fullpic
|
||||
|
||||

|
||||
|
||||
@@ -11,12 +11,7 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
<!--
|
||||
**Be kind to the WiFi!**<br/>
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*<br/>
|
||||
*Thank you!*
|
||||
-->
|
||||
**WiFi RedGuests5**
|
||||
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# NSX-T
|
||||
|
||||
*Connect and secure Kubernetes Pods*
|
||||
|
||||
- Distributed firewall and micro-segmentation for VMs and Pods
|
||||
|
||||
- Ingress and LoadBalancer Controller for Kubernetes
|
||||
|
||||
- Traceflow for Pods and dynamic routing
|
||||
@@ -1,16 +0,0 @@
|
||||
# PKS
|
||||
|
||||
*Automate and streamline Kubernetes cluster deployment and operations*
|
||||
|
||||
- Fully automated installation of mainstream Kubernetes
|
||||
|
||||
- Scale up, scale down & upgrade clusters
|
||||
|
||||
- Highly-available control plane & self-healing features
|
||||
|
||||
(replace nodes automatically when needed and deploy CVE patches)
|
||||
|
||||
- Integration with VMware SDDC (Software Defined Data Center) features
|
||||
|
||||
(e.g. vMotion, DRS, Shared Datastore, NSX-T, vREALIZE Suite)
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# vRLI
|
||||
|
||||
*Centralize logs*
|
||||
|
||||
- Compatible with syslog
|
||||
|
||||
- Query language
|
||||
|
||||
- Dashboards
|
||||
|
||||
- High ingest capacity
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# vROPS
|
||||
|
||||
*Manage Kubernetes and/or PKS clusters*
|
||||
|
||||
- Automatically add new PKS clusters after deployment
|
||||
|
||||
- Supervision
|
||||
|
||||
- Capacity management
|
||||
|
||||
- Global view of infrastructure
|
||||
@@ -1,9 +0,0 @@
|
||||
# vSAN
|
||||
|
||||
*Instantiate Stateful Pods*
|
||||
|
||||
- Compatible with CSI
|
||||
|
||||
- Distributed storage for higher fault tolerance + performance
|
||||
|
||||
- Available for Pods and VMs
|
||||
@@ -112,6 +112,17 @@ div.pic img {
|
||||
max-width: 1210px;
|
||||
max-height: 550px;
|
||||
}
|
||||
div.fullpic p {
|
||||
margin: 0;
|
||||
}
|
||||
div.fullpic {
|
||||
padding: 0;
|
||||
}
|
||||
div.fullpic img {
|
||||
margin: auto;
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
}
|
||||
div.pic h1, div.pic h2, div.title h1, div.title h2 {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
var slideshow = remark.create({
|
||||
ratio: '16:9',
|
||||
highlightSpans: true,
|
||||
slideNumberFormat: '@@SLIDENUMBERPREFIX@@%current%/%total%',
|
||||
excludedClasses: [@@EXCLUDE@@]
|
||||
});
|
||||
</script>
|
||||
|
||||
Reference in New Issue
Block a user