mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-05-03 23:46:40 +00:00
Compare commits
51 Commits
kube-2019-
...
kube-2019-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fad70712ed | ||
|
|
703a438c09 | ||
|
|
12e1da32fb | ||
|
|
a61b69ad9a | ||
|
|
3388db4272 | ||
|
|
d2d901302f | ||
|
|
1fae4253bc | ||
|
|
f7f5ab1304 | ||
|
|
2a747316c6 | ||
|
|
5b1d75c19d | ||
|
|
7addacef22 | ||
|
|
0136391ab5 | ||
|
|
ed27ad1d1e | ||
|
|
c15aa708df | ||
|
|
285add8f0a | ||
|
|
ac955fa42a | ||
|
|
5749348883 | ||
|
|
04cd0320ac | ||
|
|
bc885f3dca | ||
|
|
bbe35a3901 | ||
|
|
eb17b4c628 | ||
|
|
a4d50a5439 | ||
|
|
98d2b79c97 | ||
|
|
8320534a5c | ||
|
|
74ece65947 | ||
|
|
7444f8d71e | ||
|
|
c9bc417a32 | ||
|
|
7d4331477a | ||
|
|
ff132fd728 | ||
|
|
4ec7b1d7f4 | ||
|
|
e08e7848ed | ||
|
|
be6afa3e5e | ||
|
|
c340d909de | ||
|
|
b667cf7cfc | ||
|
|
e04998e9cd | ||
|
|
84198b3fdc | ||
|
|
5c161d2090 | ||
|
|
0fc7c2316c | ||
|
|
fb64c0d68f | ||
|
|
23aaf7f58c | ||
|
|
6cbcc4ae69 | ||
|
|
0b80238736 | ||
|
|
4c285b5318 | ||
|
|
2095a15728 | ||
|
|
13ba8cef9d | ||
|
|
be2374c672 | ||
|
|
f96da2d260 | ||
|
|
5958874071 | ||
|
|
370bdf9aaf | ||
|
|
381cd27037 | ||
|
|
c409c6997a |
160
k8s/dockercoins.yaml
Normal file
160
k8s/dockercoins.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
image: haproxy:1
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: kibana
|
||||
name: whatever
|
||||
spec:
|
||||
rules:
|
||||
- host: kibana.185.145.251.54.nip.io
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: kibana
|
||||
servicePort: 5601
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
|
||||
8
k8s/nginx-1-without-volume.yaml
Normal file
8
k8s/nginx-1-without-volume.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
13
k8s/nginx-2-with-volume.yaml
Normal file
13
k8s/nginx-2-with-volume.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
20
k8s/nginx-4-with-init.yaml
Normal file
20
k8s/nginx-4-with-init.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
@@ -7,9 +7,9 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
- Vagrant: https://www.vagrantup.com/downloads.html
|
||||
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
@@ -38,6 +38,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -66,6 +67,14 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -10,15 +10,21 @@ These tools can help you to create VMs on:
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
|
||||
## General Workflow
|
||||
|
||||
@@ -256,3 +262,32 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
@@ -127,11 +127,11 @@ _cmd_kubebins() {
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.15/etcd-v3.3.15-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
|
||||
curl -L https://dl.k8s.io/v1.16.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
@@ -143,7 +143,7 @@ _cmd_kubebins() {
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
|
||||
@@ -106,6 +106,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
|
||||
@@ -104,22 +104,6 @@ like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be set up separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
speaker: jpetazzo
|
||||
title: Deploying and scaling applications with Kubernetes
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
|
||||
slides: https://velocity-2019-11.container.training/
|
||||
|
||||
- date: 2019-11-13
|
||||
country: fr
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
#- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,63 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -667,17 +667,12 @@ class: extra-details
|
||||
|
||||
- For auditing purposes, sometimes we want to know who can perform an action
|
||||
|
||||
- There is a proof-of-concept tool by Aqua Security which does exactly that:
|
||||
- There are a few tools to help us with that
|
||||
|
||||
https://github.com/aquasecurity/kubectl-who-can
|
||||
- [kubectl-who-can](https://github.com/aquasecurity/kubectl-who-can) by Aqua Security
|
||||
|
||||
- This is one way to install it:
|
||||
```bash
|
||||
docker run --rm -v /usr/local/bin:/go/bin golang \
|
||||
go get -v github.com/aquasecurity/kubectl-who-can
|
||||
```
|
||||
- [Review Access (aka Rakkess)](https://github.com/corneliusweig/rakkess)
|
||||
|
||||
- This is one way to use it:
|
||||
```bash
|
||||
kubectl-who-can create pods
|
||||
```
|
||||
- Both are available as standalone programs, or as plugins for `kubectl`
|
||||
|
||||
(`kubectl` plugins can be installed and managed with `krew`)
|
||||
|
||||
@@ -15,26 +15,3 @@
|
||||
- `dockercoins/webui:v0.1`
|
||||
|
||||
- `dockercoins/worker:v0.1`
|
||||
|
||||
---
|
||||
|
||||
## Setting `$REGISTRY` and `$TAG`
|
||||
|
||||
- In the upcoming exercises and labs, we use a couple of environment variables:
|
||||
|
||||
- `$REGISTRY` as a prefix to all image names
|
||||
|
||||
- `$TAG` as the image version tag
|
||||
|
||||
- For example, the worker image is `$REGISTRY/worker:$TAG`
|
||||
|
||||
- If you copy-paste the commands in these exercises:
|
||||
|
||||
**make sure that you set `$REGISTRY` and `$TAG` first!**
|
||||
|
||||
- For example:
|
||||
```
|
||||
export REGISTRY=dockercoins TAG=v0.1
|
||||
```
|
||||
|
||||
(this will expand `$REGISTRY/worker:$TAG` to `dockercoins/worker:v0.1`)
|
||||
|
||||
@@ -44,21 +44,37 @@
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Basic autoscaling
|
||||
- Autoscaling
|
||||
|
||||
- Blue/green deployment, canary deployment
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
|
||||
- Long running services, but also batch (one-off) jobs
|
||||
- Ressource management and scheduling
|
||||
|
||||
- Overcommit our cluster and *evict* low-priority jobs
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
|
||||
- Run services with *stateful* data (databases etc.)
|
||||
- Advanced rollout patterns
|
||||
|
||||
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
|
||||
(blue/green deployment, canary deployment)
|
||||
|
||||
- Integrating third party services (*service catalog*)
|
||||
---
|
||||
|
||||
- Automating complex tasks (*operators*)
|
||||
## More things that Kubernetes can do for us
|
||||
|
||||
- Batch jobs
|
||||
|
||||
(one-off; parallel; also cron-style periodic execution)
|
||||
|
||||
- Fine-grained access control
|
||||
|
||||
(defining *what* can be done by *whom* on *which* resources)
|
||||
|
||||
- Stateful services
|
||||
|
||||
(databases, message queues, etc.)
|
||||
|
||||
- Automating complex tasks with *operators*
|
||||
|
||||
(e.g. database replication, failover, etc.)
|
||||
|
||||
---
|
||||
|
||||
@@ -191,11 +207,29 @@ No!
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We could also use `rkt` ("Rocket") from CoreOS
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
|
||||
|
||||
(like CRI-O, or containerd)
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some runtimes available through CRI
|
||||
|
||||
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
|
||||
|
||||
- maintained by Docker, IBM, and community
|
||||
- used by Docker Engine, microk8s, k3s, GKE; also standalone
|
||||
- comes with its own CLI, `ctr`
|
||||
|
||||
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
|
||||
|
||||
- maintained by Red Hat, SUSE, and community
|
||||
- used by OpenShift and Kubic
|
||||
- designed specifically as a minimal runtime for Kubernetes
|
||||
|
||||
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -193,7 +193,12 @@
|
||||
|
||||
- Best practice: set a memory limit, and pass it to the runtime
|
||||
|
||||
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
|
||||
- Note: recent versions of the JVM can do this automatically
|
||||
|
||||
(see [JDK-8146115](https://bugs.java.com/bugdatabase/view_bug.do?bug_id=JDK-8146115))
|
||||
and
|
||||
[this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/)
|
||||
for detailed examples)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -105,6 +105,22 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
---
|
||||
|
||||
## Other dashboards
|
||||
|
||||
- [Kube Web View](https://codeberg.org/hjacobs/kube-web-view)
|
||||
|
||||
- read-only dashboard
|
||||
|
||||
- optimized for "troubleshooting and incident response"
|
||||
|
||||
- see [vision and goals](https://kube-web-view.readthedocs.io/en/latest/vision.html#vision) for details
|
||||
|
||||
- [Kube Ops View](https://github.com/hjacobs/kube-ops-view)
|
||||
|
||||
- "provides a common operational picture for multiple Kubernetes clusters"
|
||||
|
||||
---
|
||||
|
||||
# Security implications of `kubectl apply`
|
||||
|
||||
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
|
||||
@@ -156,4 +172,3 @@ The dashboard will then ask you which authentication you want to use.
|
||||
- It introduces new failure modes
|
||||
|
||||
(for instance, if you try to apply YAML from a link that's no longer valid)
|
||||
|
||||
|
||||
@@ -481,13 +481,13 @@ docker run alpine echo hello world
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the file `kubeconfig.kubelet` with `kubectl`:
|
||||
- Create the file `~/.kube/config` with `kubectl`:
|
||||
```bash
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
kubectl config \
|
||||
set-cluster localhost --server http://localhost:8080
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
kubectl config \
|
||||
set-context localhost --cluster localhost
|
||||
kubectl --kubeconfig kubeconfig.kubelet config \
|
||||
kubectl config \
|
||||
use-context localhost
|
||||
```
|
||||
|
||||
@@ -495,19 +495,7 @@ docker run alpine echo hello world
|
||||
|
||||
---
|
||||
|
||||
## All Kubernetes clients can use `kubeconfig`
|
||||
|
||||
- The `kubeconfig.kubelet` file has the same format as e.g. `~/.kubeconfig`
|
||||
|
||||
- All Kubernetes clients can use a similar file
|
||||
|
||||
- The `kubectl config` commands can be used to manipulate these files
|
||||
|
||||
- This highlights that kubelet is a "normal" client of the API server
|
||||
|
||||
---
|
||||
|
||||
## Our `kubeconfig.kubelet` file
|
||||
## Our `~/.kube/config` file
|
||||
|
||||
The file that we generated looks like the one below.
|
||||
|
||||
@@ -533,9 +521,9 @@ clusters:
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start kubelet with that `kubeconfig.kubelet` file:
|
||||
- Start kubelet with that kubeconfig file:
|
||||
```bash
|
||||
kubelet --kubeconfig kubeconfig.kubelet
|
||||
kubelet --kubeconfig ~/.kube/config
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
209
slides/k8s/dryrun.md
Normal file
209
slides/k8s/dryrun.md
Normal file
@@ -0,0 +1,209 @@
|
||||
# Authoring YAML
|
||||
|
||||
- There are various ways to generate YAML with Kubernetes, e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- `kubectl create deployment` (and a few other `kubectl create` variants)
|
||||
|
||||
- `kubectl expose`
|
||||
|
||||
- When and why do we need to write our own YAML?
|
||||
|
||||
- How do we write YAML from scratch?
|
||||
|
||||
---
|
||||
|
||||
## The limits of generated YAML
|
||||
|
||||
- Many advanced (and even not-so-advanced) features require to write YAML:
|
||||
|
||||
- pods with multiple containers
|
||||
|
||||
- resource limits
|
||||
|
||||
- healthchecks
|
||||
|
||||
- DaemonSets, StatefulSets
|
||||
|
||||
- and more!
|
||||
|
||||
- How do we access these features?
|
||||
|
||||
---
|
||||
|
||||
## We don't have to start from scratch
|
||||
|
||||
- Create a resource (e.g. Deployment)
|
||||
|
||||
- Dump its YAML with `kubectl get -o yaml ...`
|
||||
|
||||
- Edit the YAML
|
||||
|
||||
- Use `kubectl apply -f ...` with the YAML file to:
|
||||
|
||||
- update the resource (if it's the same kind)
|
||||
|
||||
- create a new resource (if it's a different kind)
|
||||
|
||||
- Or: Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run` option
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the YAML for a Deployment without creating it:
|
||||
```bash
|
||||
kubectl create deployment web --image nginx --dry-run
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We can clean up that YAML even more if we want
|
||||
|
||||
(for instance, we can remove the `creationTimestamp` and empty dicts)
|
||||
|
||||
---
|
||||
|
||||
## Using `--dry-run` with `kubectl apply`
|
||||
|
||||
- The `--dry-run` option can also be used with `kubectl apply`
|
||||
|
||||
- However, it can be misleading (it doesn't do a "real" dry run)
|
||||
|
||||
- Let's see what happens in the following scenario:
|
||||
|
||||
- generate the YAML for a Deployment
|
||||
|
||||
- tweak the YAML to transform it into a DaemonSet
|
||||
|
||||
- apply that YAML to see what would actually be created
|
||||
|
||||
---
|
||||
|
||||
## The limits of `kubectl apply --dry-run`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Generate the YAML for a deployment:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx -o yaml > web.yaml
|
||||
```
|
||||
|
||||
- Change the `kind` in the YAML to make it a `DaemonSet`:
|
||||
```bash
|
||||
sed -i s/Deployment/DaemonSet/ web.yaml
|
||||
```
|
||||
|
||||
- Ask `kubectl` what would be applied:
|
||||
```bash
|
||||
kubectl apply -f web.yaml --dry-run --validate=false -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The resulting YAML doesn't represent a valid DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Since Kubernetes 1.13, we can use [server-side dry run and diffs](https://kubernetes.io/blog/2019/01/14/apiserver-dry-run-and-kubectl-diff/)
|
||||
|
||||
- Server-side dry run will do all the work, but *not* persist to etcd
|
||||
|
||||
(all validation and mutation hooks will be executed)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try the same YAML file as earlier, with server-side dry run:
|
||||
```bash
|
||||
kubectl apply -f web.yaml --server-dry-run --validate=false -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The resulting YAML doesn't have the `replicas` field anymore.
|
||||
|
||||
Instead, it has the fields expected in a DaemonSet.
|
||||
|
||||
---
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
|
||||
- The only step that is skipped is "write to etcd"
|
||||
|
||||
- YAML that passes server-side dry run *should* apply successfully
|
||||
|
||||
(unless the cluster state changes by the time the YAML is actually applied)
|
||||
|
||||
- Validating or mutating hooks that have side effects can also be an issue
|
||||
|
||||
---
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- Kubernetes 1.13 also introduced `kubectl diff`
|
||||
|
||||
- `kubectl diff` does a server-side dry run, *and* shows differences
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try `kubectl diff` on the YAML that we tweaked earlier:
|
||||
```bash
|
||||
kubectl diff -f web.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: we don't need to specify `--validate=false` here.
|
||||
|
||||
---
|
||||
|
||||
## Advantage of YAML
|
||||
|
||||
- Using YAML (instead of `kubectl run`/`create`/etc.) allows to be *declarative*
|
||||
|
||||
- The YAML describes the desired state of our cluster and applications
|
||||
|
||||
- YAML can be stored, versioned, archived (e.g. in git repositories)
|
||||
|
||||
- To change resources, change the YAML files
|
||||
|
||||
(instead of using `kubectl edit`/`scale`/`label`/etc.)
|
||||
|
||||
- Changes can be reviewed before being applied
|
||||
|
||||
(with code reviews, pull requests ...)
|
||||
|
||||
- This workflow is sometimes called "GitOps"
|
||||
|
||||
(there are tools like Weave Flux or GitKube to facilitate it)
|
||||
|
||||
---
|
||||
|
||||
## YAML in practice
|
||||
|
||||
- Get started with `kubectl run`/`create`/`expose`/etc.
|
||||
|
||||
- Dump the YAML with `kubectl get -o yaml`
|
||||
|
||||
- Tweak that YAML and `kubectl apply` it back
|
||||
|
||||
- Store that YAML for reference (for further deployments)
|
||||
|
||||
- Feel free to clean up the YAML:
|
||||
|
||||
- remove fields you don't know
|
||||
|
||||
- check that it still works!
|
||||
|
||||
- That YAML will be useful later when using e.g. Kustomize or Helm
|
||||
@@ -1,41 +1,3 @@
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- A good healthcheck should always indicate the health of the service itself
|
||||
|
||||
- It should not be affected by the state of the service's dependencies
|
||||
|
||||
- Example: a web server requiring a database connection to operate
|
||||
|
||||
(make sure that the healthcheck can report "OK" even if the database is down;
|
||||
<br/>
|
||||
because it won't help us to restart the web server if the issue is with the DB!)
|
||||
|
||||
- Example: a microservice calling other microservices
|
||||
|
||||
- Example: a worker process
|
||||
|
||||
(these will generally require minor code changes to report health)
|
||||
|
||||
---
|
||||
|
||||
## Adding healthchecks to an app
|
||||
|
||||
- Let's add healthchecks to DockerCoins!
|
||||
@@ -370,24 +332,4 @@ class: extra-details
|
||||
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for worker
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because worker isn't a backend for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is an option
|
||||
|
||||
(but it has a high potential for unwanted side effects and false positives)
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
- Discussion of this in [Video - 10 Ways to Shoot Yourself in the Foot with Kubernetes, #9 Will Surprise You](https://www.youtube.com/watch?v=QKI-JRs2RIE)
|
||||
|
||||
@@ -42,9 +42,11 @@
|
||||
|
||||
- internal corruption (causing all requests to error)
|
||||
|
||||
- If the liveness probe fails *N* consecutive times, the container is killed
|
||||
- Anything where our incident response would be "just restart/reboot it"
|
||||
|
||||
- *N* is the `failureThreshold` (3 by default)
|
||||
.warning[**Do not** use liveness probes for problems that can't be fixed by a restart]
|
||||
|
||||
- Otherwise we just restart our pods for no reason, creating useless load
|
||||
|
||||
---
|
||||
|
||||
@@ -52,7 +54,7 @@
|
||||
|
||||
- Indicates if the container is ready to serve traffic
|
||||
|
||||
- If a container becomes "unready" (let's say busy!) it might be ready again soon
|
||||
- If a container becomes "unready" it might be ready again soon
|
||||
|
||||
- If the readiness probe fails:
|
||||
|
||||
@@ -66,19 +68,79 @@
|
||||
|
||||
## When to use a readiness probe
|
||||
|
||||
- To indicate temporary failures
|
||||
- To indicate failure due to an external cause
|
||||
|
||||
- the application can only service *N* parallel connections
|
||||
- database is down or unreachable
|
||||
|
||||
- the runtime is busy doing garbage collection or initial data load
|
||||
- mandatory auth or other backend service unavailable
|
||||
|
||||
- The container is marked as "not ready" after `failureThreshold` failed attempts
|
||||
- To indicate temporary failure or unavailability
|
||||
|
||||
(3 by default)
|
||||
- application can only service *N* parallel connections
|
||||
|
||||
- It is marked again as "ready" after `successThreshold` successful attempts
|
||||
- runtime is busy doing garbage collection or initial data load
|
||||
|
||||
(1 by default)
|
||||
- For processes that take a long time to start
|
||||
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
- If a web server depends on a database to function, and the database is down:
|
||||
|
||||
- the web server's liveness probe should succeed
|
||||
|
||||
- the web server's readiness probe should fail
|
||||
|
||||
- Same thing for any hard dependency (without which the container can't work)
|
||||
|
||||
.warning[**Do not** fail liveness probes for problems that are external to the container]
|
||||
|
||||
---
|
||||
|
||||
## Timing and thresholds
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
.warning[If a probe takes longer than that, it is considered as a FAIL]
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- A probe can have an `initialDelaySeconds` parameter (default: 0)
|
||||
|
||||
- Kubernetes will wait that amount of time before running the probe for the first time
|
||||
|
||||
(this is important to avoid killing services that take a long time to start)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Startup probe
|
||||
|
||||
- Kubernetes 1.16 introduces a third type of probe: `startupProbe`
|
||||
|
||||
(it is in `alpha` in Kubernetes 1.16)
|
||||
|
||||
- It can be used to indicate "container not ready *yet*"
|
||||
|
||||
- process is still starting
|
||||
|
||||
- loading external data, priming caches
|
||||
|
||||
- Before Kubernetes 1.16, we had to use the `initialDelaySeconds` parameter
|
||||
|
||||
(available for both liveness and readiness probes)
|
||||
|
||||
- `initialDelaySeconds` is a rigid delay (always wait X before running probes)
|
||||
|
||||
- `startupProbe` works better when a container start time can vary a lot
|
||||
|
||||
---
|
||||
|
||||
@@ -112,10 +174,12 @@
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
- Overloaded backends get removed from load balancer rotation
|
||||
- Unavailable backends get removed from load balancer rotation
|
||||
|
||||
(thus improving response times across the board)
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
|
||||
---
|
||||
|
||||
## Example: HTTP probe
|
||||
@@ -165,14 +229,56 @@ If the Redis process becomes unresponsive, it will be killed.
|
||||
|
||||
---
|
||||
|
||||
## Details about liveness and readiness probes
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- Liveness checks should not be influenced by the state of external services
|
||||
|
||||
- All checks should reply quickly (by default, less than 1 second)
|
||||
|
||||
- Otherwise, they are considered to fail
|
||||
|
||||
- This might require to check the health of dependencies asynchronously
|
||||
|
||||
(e.g. if a database or API might be healthy but still take more than
|
||||
1 second to reply, we should check the status asynchronously and report
|
||||
a cached status)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for workers
|
||||
|
||||
(In that context, worker = process that doesn't accept connections)
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because workers aren't backends for a service)
|
||||
|
||||
- Liveness may help us restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is a (potentially expensive) option
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
|
||||
@@ -153,10 +153,7 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```keys ^C```
|
||||
-->
|
||||
- Leave that command running, so that we can keep an eye on these logs
|
||||
|
||||
]
|
||||
|
||||
@@ -186,6 +183,44 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
## Log streaming
|
||||
|
||||
- Let's look again at the output of `kubectl logs`
|
||||
|
||||
(the one we started before scaling up)
|
||||
|
||||
- `kubectl logs` shows us one line per second
|
||||
|
||||
- We could expect 3 lines per second
|
||||
|
||||
(since we should now have 3 pods running `ping`)
|
||||
|
||||
- Let's try to figure out what's happening!
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs of multiple pods
|
||||
|
||||
- What happens if we restart `kubectl logs`?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Interrupt `kubectl logs` (with Ctrl-C)
|
||||
|
||||
- Restart it:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
|
||||
|
||||
Let's leave `kubectl logs` running while we keep exploring.
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
@@ -196,20 +231,12 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
.exercise[
|
||||
|
||||
- In a separate window, list pods, and keep watching them:
|
||||
- In a separate window, watch the list of pods:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
watch kubectl get pods
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Running```
|
||||
```keys ^C```
|
||||
```hide kubectl wait deploy pingpong --for condition=available```
|
||||
```keys kubectl delete pod ping```
|
||||
```copypaste pong-..........-.....```
|
||||
-->
|
||||
|
||||
- Destroy a pod:
|
||||
- Destroy the pod currently shown by `kubectl logs`:
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
@@ -217,6 +244,23 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
## What happened?
|
||||
|
||||
- `kubectl delete pod` terminates the pod gracefully
|
||||
|
||||
(sending it the TERM signal and waiting for it to shutdown)
|
||||
|
||||
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
|
||||
|
||||
- But we can still see the output of the "Terminating" pod in `kubectl logs`
|
||||
|
||||
- Until 30 seconds later, when the grace period expires
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What if we wanted something different?
|
||||
|
||||
- What if we wanted to start a "one-shot" container that *doesn't* get restarted?
|
||||
@@ -234,6 +278,72 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
## Scheduling periodic background work
|
||||
|
||||
- A Cron Job is a job that will be executed at specific intervals
|
||||
|
||||
(the name comes from the traditional cronjobs executed by the UNIX crond)
|
||||
|
||||
- It requires a *schedule*, represented as five space-separated fields:
|
||||
|
||||
- minute [0,59]
|
||||
- hour [0,23]
|
||||
- day of the month [1,31]
|
||||
- month of the year [1,12]
|
||||
- day of the week ([0,6] with 0=Sunday)
|
||||
|
||||
- `*` means "all valid values"; `/N` means "every N"
|
||||
|
||||
- Example: `*/3 * * * *` means "every three minutes"
|
||||
|
||||
---
|
||||
|
||||
## Creating a Cron Job
|
||||
|
||||
- Let's create a simple job to be executed every three minutes
|
||||
|
||||
- Cron Jobs need to terminate, otherwise they'd run forever
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Cron Job:
|
||||
```bash
|
||||
kubectl run --schedule="*/3 * * * *" --restart=OnFailure --image=alpine sleep 10
|
||||
```
|
||||
|
||||
- Check the resource that was created:
|
||||
```bash
|
||||
kubectl get cronjobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cron Jobs in action
|
||||
|
||||
- At the specified schedule, the Cron Job will create a Job
|
||||
|
||||
- The Job will create a Pod
|
||||
|
||||
- The Job will make sure that the Pod completes
|
||||
|
||||
(re-creating another one if it fails, for instance if its node fails)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the Jobs that are created:
|
||||
```bash
|
||||
kubectl get jobs
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(It will take a few minutes before the first job is scheduled.)
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What about that deprecation warning?
|
||||
|
||||
- As we can see from the previous slide, `kubectl run` can do many things
|
||||
|
||||
@@ -66,6 +66,8 @@ Exactly what we need!
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
```
|
||||
|
||||
- On OS X, just `brew install stern`
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
---
|
||||
|
||||
@@ -218,6 +218,18 @@ class: extra-details
|
||||
|
||||
## What's going on?
|
||||
|
||||
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
|
||||
|
||||
- It lets the container engine use a default network
|
||||
|
||||
(in that case, we end up with the default Docker bridge)
|
||||
|
||||
- Our pods are running on independent, disconnected, host-local networks
|
||||
|
||||
---
|
||||
|
||||
## What do we need to do?
|
||||
|
||||
- On a normal cluster, kubelet is configured to set up pod networking with CNI plugins
|
||||
|
||||
- This requires:
|
||||
@@ -228,14 +240,6 @@ class: extra-details
|
||||
|
||||
- running kubelet with `--network-plugin=cni`
|
||||
|
||||
- Without the `--network-plugin` flag, kubelet defaults to "no-op" networking
|
||||
|
||||
- It lets the container engine use a default network
|
||||
|
||||
(in that case, we end up with the default Docker bridge)
|
||||
|
||||
- Our pods are running on independent, disconnected, host-local networks
|
||||
|
||||
---
|
||||
|
||||
## Using network plugins
|
||||
@@ -394,7 +398,7 @@ class: extra-details
|
||||
|
||||
- Start kube-proxy:
|
||||
```bash
|
||||
sudo kube-proxy --kubeconfig ~/kubeconfig
|
||||
sudo kube-proxy --kubeconfig ~/.kube/config
|
||||
```
|
||||
|
||||
- Expose our Deployment:
|
||||
|
||||
@@ -11,16 +11,36 @@
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
set -u
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
kubectl create deployment hasher --image=dockercoins/hasher:v0.1
|
||||
kubectl create deployment rng --image=dockercoins/rng:v0.1
|
||||
kubectl create deployment webui --image=dockercoins/webui:v0.1
|
||||
kubectl create deployment worker --image=dockercoins/worker:v0.1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Deploying other images
|
||||
|
||||
- If we wanted to deploy images from another registry ...
|
||||
|
||||
- ... Or with a different tag ...
|
||||
|
||||
- ... We could use the following snippet:
|
||||
|
||||
```bash
|
||||
REGISTRY=dockercoins
|
||||
TAG=v0.1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Is this working?
|
||||
|
||||
- After waiting for the deployment to complete, let's look at the logs!
|
||||
|
||||
@@ -60,10 +60,12 @@
|
||||
|
||||
(by default: every minute; can be more/less frequent)
|
||||
|
||||
- If you're worried about parsing overhead: exporters can also use protobuf
|
||||
|
||||
- The list of URLs to scrape (the *scrape targets*) is defined in configuration
|
||||
|
||||
.footnote[Worried about the overhead of parsing a text format?
|
||||
<br/>
|
||||
Check this [comparison](https://github.com/RichiH/OpenMetrics/blob/master/markdown/protobuf_vs_text.md) of the text format with the (now deprecated) protobuf format!]
|
||||
|
||||
---
|
||||
|
||||
## Defining scrape targets
|
||||
|
||||
@@ -515,3 +515,24 @@ services.nodeports 0 0
|
||||
(with `kubectl describe resourcequota ...`)
|
||||
|
||||
- Rinse and repeat regularly
|
||||
|
||||
---
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [A Practical Guide to Setting Kubernetes Requests and Limits](http://blog.kubecost.com/blog/requests-and-limits/)
|
||||
|
||||
- explains what requests and limits are
|
||||
|
||||
- provides guidelines to set requests and limits
|
||||
|
||||
- gives PromQL expressions to compute good values
|
||||
<br/>(our app needs to be running for a while)
|
||||
|
||||
- [Kube Resource Report](https://github.com/hjacobs/kube-resource-report/)
|
||||
|
||||
- generates web reports on resource usage
|
||||
|
||||
- [static demo](https://hjacobs.github.io/kube-resource-report/sample-report/output/index.html)
|
||||
|
|
||||
[live demo](https://kube-resource-report.demo.j-serv.de/applications.html)
|
||||
|
||||
@@ -14,7 +14,27 @@
|
||||
|
||||
## Rolling updates
|
||||
|
||||
- With rolling updates, when a resource is updated, it happens progressively
|
||||
- With rolling updates, when a Deployment is updated, it happens progressively
|
||||
|
||||
- The Deployment controls multiple Replica Sets
|
||||
|
||||
- Each Replica Set is a group of identical Pods
|
||||
|
||||
(with the same image, arguments, parameters ...)
|
||||
|
||||
- During the rolling update, we have at least two Replica Sets:
|
||||
|
||||
- the "new" set (corresponding to the "target" version)
|
||||
|
||||
- at least one "old" set
|
||||
|
||||
- We can have multiple "old" sets
|
||||
|
||||
(if we start another update before the first one is done)
|
||||
|
||||
---
|
||||
|
||||
## Update strategy
|
||||
|
||||
- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge`
|
||||
|
||||
@@ -61,32 +81,6 @@
|
||||
|
||||
---
|
||||
|
||||
## Building a new version of the `worker` service
|
||||
|
||||
.warning[
|
||||
Only run these commands if you have built and pushed DockerCoins to a local registry.
|
||||
<br/>
|
||||
If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip this.
|
||||
]
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stacks` directory (`~/container.training/stacks`)
|
||||
|
||||
- Edit `dockercoins/worker/worker.py`; update the first `sleep` line to sleep 1 second
|
||||
|
||||
- Build a new tag and push it to the registry:
|
||||
```bash
|
||||
#export REGISTRY=localhost:3xxxx
|
||||
export TAG=v0.2
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Rolling out the new `worker` service
|
||||
|
||||
.exercise[
|
||||
@@ -105,7 +99,7 @@ If you are using images from the Docker Hub (`dockercoins/worker:v0.1`), skip th
|
||||
|
||||
- Update `worker` either with `kubectl edit`, or by running:
|
||||
```bash
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
kubectl set image deploy worker worker=dockercoins/worker:v0.2
|
||||
```
|
||||
|
||||
]
|
||||
@@ -146,8 +140,7 @@ That rollout should be pretty quick. What shows in the web UI?
|
||||
|
||||
- Update `worker` by specifying a non-existent image:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
kubectl set image deploy worker worker=dockercoins/worker:v0.3
|
||||
```
|
||||
|
||||
- Check what's going on:
|
||||
@@ -216,27 +209,14 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check which port the dashboard is on:
|
||||
```bash
|
||||
kubectl -n kube-system get svc socat
|
||||
```
|
||||
- Connect to the dashboard that we deployed earlier
|
||||
|
||||
- Check that we have failures in Deployments, Pods, and Replica Sets
|
||||
|
||||
- Can we see the reason for the failure?
|
||||
|
||||
]
|
||||
|
||||
Note the `3xxxx` port.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to http://oneofournodes:3xxxx/
|
||||
|
||||
<!-- ```open https://node1:3xxxx/``` -->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
- We have failures in Deployments, Pods, and Replica Sets
|
||||
|
||||
---
|
||||
|
||||
## Recovering from a bad rollout
|
||||
@@ -265,6 +245,137 @@ Note the `3xxxx` port.
|
||||
|
||||
---
|
||||
|
||||
## Rolling back to an older version
|
||||
|
||||
- We reverted to `v0.2`
|
||||
|
||||
- But this version still has a performance problem
|
||||
|
||||
- How can we get back to the previous version?
|
||||
|
||||
---
|
||||
|
||||
## Multiple "undos"
|
||||
|
||||
- What happens if we try `kubectl rollout undo` again?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try it:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker
|
||||
```
|
||||
|
||||
- Check the web UI, the list of pods ...
|
||||
|
||||
]
|
||||
|
||||
🤔 That didn't work.
|
||||
|
||||
---
|
||||
|
||||
## Multiple "undos" don't work
|
||||
|
||||
- If we see successive versions as a stack:
|
||||
|
||||
- `kubectl rollout undo` doesn't "pop" the last element from the stack
|
||||
|
||||
- it copies the N-1th element to the top
|
||||
|
||||
- Multiple "undos" just swap back and forth between the last two versions!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go back to v0.2 again:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## In this specific scenario
|
||||
|
||||
- Our version numbers are easy to guess
|
||||
|
||||
- What if we had used git hashes?
|
||||
|
||||
- What if we had changed other parameters in the Pod spec?
|
||||
|
||||
---
|
||||
|
||||
## Listing versions
|
||||
|
||||
- We can list successive versions of a Deployment with `kubectl rollout history`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at our successive versions:
|
||||
```bash
|
||||
kubectl rollout history deployment worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We don't see *all* revisions.
|
||||
|
||||
We might see something like 1, 4, 5.
|
||||
|
||||
(Depending on how many "undos" we did before.)
|
||||
|
||||
---
|
||||
|
||||
## Explaining deployment revisions
|
||||
|
||||
- These revisions correspond to our Replica Sets
|
||||
|
||||
- This information is stored in the Replica Set annotations
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the annotations for our replica sets:
|
||||
```bash
|
||||
kubectl describe replicasets -l app=worker | grep -A3
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about the missing revisions?
|
||||
|
||||
- The missing revisions are stored in another annotation:
|
||||
|
||||
`deployment.kubernetes.io/revision-history`
|
||||
|
||||
- These are not shown in `kubectl rollout history`
|
||||
|
||||
- We could easily reconstruct the full list with a script
|
||||
|
||||
(if we wanted to!)
|
||||
|
||||
---
|
||||
|
||||
## Rolling back to an older version
|
||||
|
||||
- `kubectl rollout undo` can work with a revision number
|
||||
|
||||
.exercise[
|
||||
|
||||
- Roll back to the "known good" deployment version:
|
||||
```bash
|
||||
kubectl rollout undo deployment worker --to-revision=1
|
||||
```
|
||||
|
||||
- Check the web UI or the list of pods
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Changing rollout parameters
|
||||
@@ -285,7 +396,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: $REGISTRY/worker:v0.1
|
||||
image: dockercoins/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
@@ -316,7 +427,7 @@ class: extra-details
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: $REGISTRY/worker:v0.1
|
||||
image: dockercoins/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
|
||||
@@ -61,7 +61,8 @@
|
||||
|
||||
- [minikube](https://kubernetes.io/docs/setup/minikube/),
|
||||
[kubespawn](https://github.com/kinvolk/kube-spawn),
|
||||
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/):
|
||||
[Docker Desktop](https://docs.docker.com/docker-for-mac/kubernetes/),
|
||||
[kind](https://kind.sigs.k8s.io):
|
||||
for local development
|
||||
|
||||
- [kubicorn](https://github.com/kubicorn/kubicorn),
|
||||
|
||||
@@ -66,7 +66,87 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example
|
||||
## Adding a volume to a Pod
|
||||
|
||||
- We will start with the simplest Pod manifest we can find
|
||||
|
||||
- We will add a volume to that Pod manifest
|
||||
|
||||
- We will mount that volume in a container in the Pod
|
||||
|
||||
- By default, this volume will be an `emptyDir`
|
||||
|
||||
(an empty directory)
|
||||
|
||||
- It will "shadow" the directory where it's mounted
|
||||
|
||||
---
|
||||
|
||||
## Our basic Pod
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
```
|
||||
|
||||
This is a MVP! (Minimum Viable Pod😉)
|
||||
|
||||
It runs a single NGINX container.
|
||||
|
||||
---
|
||||
|
||||
## Trying the basic pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Pod:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/nginx-1-without-volume.yaml
|
||||
```
|
||||
|
||||
- Get its IP address:
|
||||
```bash
|
||||
IPADDR=$(kubectl get pod nginx-without-volume -o jsonpath={.status.podIP})
|
||||
```
|
||||
|
||||
- Send a request with curl:
|
||||
```bash
|
||||
curl $IPADDR
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(We should see the "Welcome to NGINX" page.)
|
||||
|
||||
---
|
||||
|
||||
## Adding a volume
|
||||
|
||||
- We need to add the volume in two places:
|
||||
|
||||
- at the Pod level (to declare the volume)
|
||||
|
||||
- at the container level (to mount the volume)
|
||||
|
||||
- We will declare a volume named `www`
|
||||
|
||||
- No type is specified, so it will default to `emptyDir`
|
||||
|
||||
(as the name implies, it will be initialized as an empty directory at pod creation)
|
||||
|
||||
- In that pod, there is also a container named `nginx`
|
||||
|
||||
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
|
||||
|
||||
---
|
||||
|
||||
## The Pod with a volume
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -86,30 +166,57 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example, explained
|
||||
## Trying the Pod with a volume
|
||||
|
||||
- We define a standalone `Pod` named `nginx-with-volume`
|
||||
.exercise[
|
||||
|
||||
- In that pod, there is a volume named `www`
|
||||
- Create the Pod:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/nginx-2-with-volume.yaml
|
||||
```
|
||||
|
||||
- No type is specified, so it will default to `emptyDir`
|
||||
- Get its IP address:
|
||||
```bash
|
||||
IPADDR=$(kubectl get pod nginx-with-volume -o jsonpath={.status.podIP})
|
||||
```
|
||||
|
||||
(as the name implies, it will be initialized as an empty directory at pod creation)
|
||||
- Send a request with curl:
|
||||
```bash
|
||||
curl $IPADDR
|
||||
```
|
||||
|
||||
- In that pod, there is also a container named `nginx`
|
||||
]
|
||||
|
||||
- That container mounts the volume `www` to path `/usr/share/nginx/html/`
|
||||
(We should now see a "403 Forbidden" error page.)
|
||||
|
||||
---
|
||||
|
||||
## A volume shared between two containers
|
||||
## Populating the volume with another container
|
||||
|
||||
- Let's add another container to the Pod
|
||||
|
||||
- Let's mount the volume in *both* containers
|
||||
|
||||
- That container will populate the volume with static files
|
||||
|
||||
- NGINX will then serve these static files
|
||||
|
||||
- To populate the volume, we will clone the Spoon-Knife repository
|
||||
|
||||
- this repository is https://github.com/octocat/Spoon-Knife
|
||||
|
||||
- it's very popular (more than 100K stars!)
|
||||
|
||||
---
|
||||
|
||||
## Sharing a volume between two containers
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -147,30 +254,72 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Sharing a volume, in action
|
||||
## Trying the shared volume
|
||||
|
||||
- Let's try it!
|
||||
- This one will be time-sensitive!
|
||||
|
||||
- We need to catch the Pod IP address *as soon as it's created*
|
||||
|
||||
- Then send a request to it *as fast as possible*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the pod by applying the YAML file:
|
||||
- Watch the pods (so that we can catch the Pod IP address)
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/nginx-with-volume.yaml
|
||||
kubectl get pods -o wide --watch
|
||||
```
|
||||
|
||||
- Check the IP address that was allocated to our pod:
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Shared volume in action
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the pod:
|
||||
```bash
|
||||
kubectl get pod nginx-with-volume -o wide
|
||||
IP=$(kubectl get pod nginx-with-volume -o json | jq -r .status.podIP)
|
||||
kubectl create -f ~/container.training/k8s/nginx-3-with-git.yaml
|
||||
```
|
||||
|
||||
- Access the web server:
|
||||
- As soon as we see its IP address, access it:
|
||||
```bash
|
||||
curl $IP
|
||||
```
|
||||
|
||||
- A few seconds later, the state of the pod will change; access it again:
|
||||
```bash
|
||||
curl $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The first time, we should see "403 Forbidden".
|
||||
|
||||
The second time, we should see the HTML file from the Spoon-Knife repository.
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- Both containers are started at the same time
|
||||
|
||||
- NGINX starts very quickly
|
||||
|
||||
(it can serve requests immediately)
|
||||
|
||||
- But at this point, the volume is empty
|
||||
|
||||
(NGINX serves "403 Forbidden")
|
||||
|
||||
- The other containers installs git and clones the repository
|
||||
|
||||
(this takes a bit longer)
|
||||
|
||||
- When the other container is done, the volume holds the repository
|
||||
|
||||
(NGINX serves the HTML file)
|
||||
|
||||
---
|
||||
|
||||
## The devil is in the details
|
||||
@@ -183,13 +332,100 @@ spec:
|
||||
|
||||
- That's why we specified `restartPolicy: OnFailure`
|
||||
|
||||
---
|
||||
|
||||
## Inconsistencies
|
||||
|
||||
- There is a short period of time during which the website is not available
|
||||
|
||||
(because the `git` container hasn't done its job yet)
|
||||
|
||||
- This could be avoided by using [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
|
||||
- With a bigger website, we could get inconsistent results
|
||||
|
||||
(we will see a live example in a few sections)
|
||||
(where only a part of the content is ready)
|
||||
|
||||
- In real applications, this could cause incorrect results
|
||||
|
||||
- How can we avoid that?
|
||||
|
||||
---
|
||||
|
||||
## Init Containers
|
||||
|
||||
- We can define containers that should execute *before* the main ones
|
||||
|
||||
- They will be executed in order
|
||||
|
||||
(instead of in parallel)
|
||||
|
||||
- They must all succeed before the main containers are started
|
||||
|
||||
- This is *exactly* what we need here!
|
||||
|
||||
- Let's see one in action
|
||||
|
||||
.footnote[See [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) documentation for all the details.]
|
||||
|
||||
---
|
||||
|
||||
## Defining Init Containers
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying the init container
|
||||
|
||||
- Repeat the same operation as earlier
|
||||
|
||||
(try to send HTTP requests as soon as the pod comes up)
|
||||
|
||||
- This time, instead of "403 Forbidden" we get a "connection refused"
|
||||
|
||||
- NGINX doesn't start until the git container has done its job
|
||||
|
||||
- We never get inconsistent results
|
||||
|
||||
(a "half-ready" container)
|
||||
|
||||
---
|
||||
|
||||
## Other uses of init containers
|
||||
|
||||
- Load content
|
||||
|
||||
- Generate configuration (or certificates)
|
||||
|
||||
- Database migrations
|
||||
|
||||
- Waiting for other services to be up
|
||||
|
||||
(to avoid flurry of connection errors in main container)
|
||||
|
||||
- etc.
|
||||
|
||||
---
|
||||
|
||||
|
||||
93
slides/k8s/yamldeploy.md
Normal file
93
slides/k8s/yamldeploy.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Deploying with YAML
|
||||
|
||||
- So far, we created resources with the following commands:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- `kubectl create deployment`
|
||||
|
||||
- `kubectl expose`
|
||||
|
||||
- We can also create resources directly with YAML manifests
|
||||
|
||||
---
|
||||
|
||||
## `kubectl apply` vs `create`
|
||||
|
||||
- `kubectl create -f whatever.yaml`
|
||||
|
||||
- creates resources if they don't exist
|
||||
|
||||
- if resources already exist, don't alter them
|
||||
<br/>(and display error message)
|
||||
|
||||
- `kubectl apply -f whatever.yaml`
|
||||
|
||||
- creates resources if they don't exist
|
||||
|
||||
- if resources already exist, update them
|
||||
<br/>(to match the definition provided by the YAML file)
|
||||
|
||||
- stores the manifest as an *annotation* in the resource
|
||||
|
||||
---
|
||||
|
||||
## Creating multiple resources
|
||||
|
||||
- The manifest can contain multiple resources separated by `---`
|
||||
|
||||
```yaml
|
||||
kind: ...
|
||||
apiVersion: ...
|
||||
metadata: ...
|
||||
name: ...
|
||||
...
|
||||
---
|
||||
kind: ...
|
||||
apiVersion: ...
|
||||
metadata: ...
|
||||
name: ...
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating multiple resources
|
||||
|
||||
- The manifest can also contain a list of resources
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
items:
|
||||
- kind: ...
|
||||
apiVersion: ...
|
||||
...
|
||||
- kind: ...
|
||||
apiVersion: ...
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying dockercoins with YAML
|
||||
|
||||
- We provide a YAML manifest with all the resources for Dockercoins
|
||||
|
||||
(Deployments and Services)
|
||||
|
||||
- We can use it if we need to deploy or redeploy Dockercoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy or redeploy Dockercoins:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/dockercoins.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(If we deployed Dockercoins earlier, we will see warning messages,
|
||||
because the resources that we created lack the necessary annotation.
|
||||
We can safely ignore them.)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for Admins and Ops
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- static-pods-exercise
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/apilb.md
|
||||
- k8s/control-plane-auth.md
|
||||
- - k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/bootstrap.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,69 +0,0 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
for administrators
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
# DAY 1
|
||||
- - k8s/prereqs-admin.md
|
||||
- k8s/architecture.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/dmuc.md
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- - k8s/apilb.md
|
||||
- k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/cluster-backup.md
|
||||
- k8s/cloud-controller-manager.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
# DAY 2
|
||||
- - k8s/kubercoins.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- - k8s/openid-connect.md
|
||||
- k8s/control-plane-auth.md
|
||||
###- k8s/bootstrap.md
|
||||
- k8s/netpol.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- - k8s/prometheus.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/operators.md
|
||||
###- k8s/operators-design.md
|
||||
# CONCLUSION
|
||||
- - k8s/lastwords-admin.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
@@ -1,6 +1,5 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
@@ -8,7 +7,9 @@ chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
slides: http://kube-2019-11.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -23,7 +24,7 @@ chapters:
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
@@ -42,16 +43,18 @@ chapters:
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
-
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
#- k8s/dryrun.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/rollout.md
|
||||
#- k8s/healthchecks.md
|
||||
#- k8s/healthchecks-more.md
|
||||
@@ -86,3 +89,7 @@ chapters:
|
||||
- k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
@@ -10,6 +10,8 @@ gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
@@ -43,15 +45,17 @@ chapters:
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
-
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
-
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
|
||||
@@ -10,6 +10,8 @@ gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
@@ -23,7 +25,7 @@ chapters:
|
||||
- shared/prereqs.md
|
||||
#- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
@@ -43,19 +45,21 @@ chapters:
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
-
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/yamldeploy.md
|
||||
#- k8s/setup-k8s.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/dryrun.md
|
||||
-
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/healthchecks-more.md
|
||||
#- k8s/healthchecks-more.md
|
||||
- k8s/record.md
|
||||
-
|
||||
- k8s/namespaces.md
|
||||
|
||||
@@ -1,35 +1,13 @@
|
||||
## Intros
|
||||
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
- Hello! I'm Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
|
||||
- Hello! We are:
|
||||
- The workshop will run from 9:00 to 17:00
|
||||
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
- There will be a lunch break at 12:30
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
|
||||
<!-- .dummy[
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🚁] Alexandre ([@alexbuisine](https://twitter.com/alexbuisine), Enix SAS)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
|
||||
- .emoji[🎧] Romain ([@rdegez](https://twitter.com/rdegez), Enix SAS)
|
||||
|
||||
] -->
|
||||
|
||||
- The workshop will run from ...
|
||||
|
||||
- There will be a lunch break at ...
|
||||
|
||||
(And coffee breaks!)
|
||||
(And coffee breaks around 10:30 and 15:30)
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
@@ -80,7 +80,7 @@ def flatten(titles):
|
||||
|
||||
|
||||
def generatefromyaml(manifest, filename):
|
||||
manifest = yaml.load(manifest)
|
||||
manifest = yaml.safe_load(manifest)
|
||||
|
||||
markdown, titles = processchapter(manifest["chapters"], filename)
|
||||
logging.debug("Found {} titles.".format(len(titles)))
|
||||
@@ -111,6 +111,7 @@ def generatefromyaml(manifest, filename):
|
||||
html = html.replace("@@GITREPO@@", manifest["gitrepo"])
|
||||
html = html.replace("@@SLIDES@@", manifest["slides"])
|
||||
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
|
||||
html = html.replace("@@SLIDENUMBERPREFIX@@", manifest.get("slidenumberprefix", ""))
|
||||
return html
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,12 @@ class: in-person
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
- Log into the first VM (`node1`) with your SSH client:
|
||||
```bash
|
||||
ssh `user`@`A.B.C.D`
|
||||
```
|
||||
|
||||
(Replace `user` and `A.B.C.D` with the user and IP address provided to you)
|
||||
|
||||
<!--
|
||||
```bash
|
||||
@@ -18,16 +23,13 @@ done
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
You should see a prompt looking like this:
|
||||
```
|
||||
[A.B.C.D] (...) user@node1 ~
|
||||
$
|
||||
```
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
@@ -52,6 +54,20 @@ If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## For a consistent Kubernetes experience ...
|
||||
|
||||
- If you are using your own Kubernetes cluster, you can use [shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
- `shpod` provides a shell running in a pod on your own cluster
|
||||
|
||||
- It comes with many tools pre-installed (helm, stern...)
|
||||
|
||||
- These tools are used in many exercises in these slides
|
||||
|
||||
- `shpod` also gives you completion and a fancy prompt
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
@@ -50,10 +50,6 @@ Misattributed to Benjamin Franklin
|
||||
|
||||
- Go to @@SLIDES@@ to view these slides
|
||||
|
||||
- Join the chat room: @@CHAT@@
|
||||
|
||||
<!-- ```open @@SLIDES@@``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -11,11 +11,5 @@ class: title, in-person
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop[.](https://www.youtube.com/watch?v=h16zyxiwDLY)*<br/>
|
||||
*Thank you!*
|
||||
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,64 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,73 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -109,8 +109,8 @@ div.pic p {
|
||||
div.pic img {
|
||||
display: block;
|
||||
margin: auto;
|
||||
max-width: 1210px;
|
||||
max-height: 550px;
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
}
|
||||
div.pic h1, div.pic h2, div.title h1, div.title h2 {
|
||||
text-align: center;
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
var slideshow = remark.create({
|
||||
ratio: '16:9',
|
||||
highlightSpans: true,
|
||||
slideNumberFormat: '@@SLIDENUMBERPREFIX@@%current%/%total%',
|
||||
excludedClasses: [@@EXCLUDE@@]
|
||||
});
|
||||
</script>
|
||||
|
||||
Reference in New Issue
Block a user