Compare commits
14 Commits
2025-01-en
...
2022-09-nr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5af30c64bb | ||
|
|
75c5964c30 | ||
|
|
b112c1fae6 | ||
|
|
b4d837bbf5 | ||
|
|
dda21fee01 | ||
|
|
da2806ea93 | ||
|
|
d983592ddc | ||
|
|
d759703f9a | ||
|
|
ffbecd9e04 | ||
|
|
6a235fae44 | ||
|
|
d83a6232c4 | ||
|
|
7b7c755b95 | ||
|
|
6d0849eebb | ||
|
|
b46dcd5157 |
22
k8s/affinity-pod.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: aff-pod
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
affinity:
|
||||
containers:
|
||||
- name: aff-pod
|
||||
image: alpine
|
||||
command:
|
||||
- sleep
|
||||
args:
|
||||
- "1000"
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cow
|
||||
operator: In
|
||||
values:
|
||||
- elsie
|
||||
22
k8s/init-container.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: initty
|
||||
spec:
|
||||
volumes:
|
||||
- name: preFetched
|
||||
emptyDir: {}
|
||||
|
||||
containers:
|
||||
- name: main
|
||||
image: main
|
||||
volumeMounts:
|
||||
- name: preFetched
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git-cloner
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /preFetched" ]
|
||||
volumeMounts:
|
||||
- name: preFetched
|
||||
mountPath: /preFetched/
|
||||
18
k8s/k8s-nr-kubeconfig.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lDQm5Vd0RRWUpLb1pJaHZjTkFRRUxCUUF3TXpFVk1CTUdBMVVFQ2hNTVJHbG4KYVhSaGJFOWpaV0Z1TVJvd0dBWURWUVFERXhGck9ITmhZWE1nUTJ4MWMzUmxjaUJEUVRBZUZ3MHlNakE1TVRneQpNekV6TWpGYUZ3MDBNakE1TVRneU16RXpNakZhTURNeEZUQVRCZ05WQkFvVERFUnBaMmwwWVd4UFkyVmhiakVhCk1CZ0dBMVVFQXhNUmF6aHpZV0Z6SUVOc2RYTjBaWElnUTBFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUIKRHdBd2dnRUtBb0lCQVFEYnVlN1MzRS9hdFpvQVJjSUllRFJNMG5vMThvaDNEL3cyV3VWQmNaQWppZXhmNGw4VQpldEZlWDBWQmZFZGJqUndIWTYva2VHdHVzS0dXUzNZdUN5RHd3WFNhMEV5NS9LM0ZLUHhEUkdyUWJSNXJkUWg5CmI4NW1IbXVIcUYvQXJHMWJVV2JYQmFRVVhBdXNtMVpjMnNtOXdWQm0vRlRJSTJDdEpReTViVXVIQnY3N01BNHEKUzV3b1liMXkwUHo0OXNuVldiY3BXZ1FxR080SE9JelFJc2crakxYR0lhWi96L0lneHR2M0ZYaVJVUlVIZWhERwplTTVuRDErY1JuUkorcDlLQU9VMUdOZzQwVENoN3hjaGo3UHNJMDV1Q0xVQWFhYVJ4M0pVRFBpRXgxWjVjOHQwCll6aTBXTVVTUVpkTjlUc3UrNGZZaXAyTFpkZGxXOW1ma0NYREFnTUJBQUdqUlRCRE1BNEdBMVVkRHdFQi93UUUKQXdJQmhqQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUFNQjBHQTFVZERnUVdCQlNpcEo3SHZQTkRZMWcrcDNEdwp0TUEvNThmUmFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFuYkNYSHUvM3YrbXRlU3N4TXFxUndJd1c0T015CkdRdzE0aERtYkFRcmovYVo0WkFvZUJIdFJSMGYxTFFXQnVIQTBtTFJvSTFSenpBQWw3V2lNMDd6VU1ETlV2enUKR0FCVmtwOEV6b2RneTlNclFkN2VtZkNJRFA3SkhZV1FzL1VxcGVVZW4zcHljQ3dXZFFXY3ZDR0FtTEZZSzI3TApKcnFKV1JXNGErWTVDUkhqVytzTGJpeTNNMTdrOHVWM1pzMktNS0FUaVNXWUZTUzUrSkg5Tk5WdXNKd1lUZVZPCmJOZG5PbS9ub1NLejYrbHUvUm1NK0NsUFdXakdXcUlHdHZyNFl6b0puZk52UDNXL01FQXlzY3Zlck9jcXUxWTAKa1dmRkg2azVlY3NsK2k1RTFkaE02U0JRaFZzV1crMjFlN1plbVJwc1htNkNyYUZqek4vSFlaMEMzdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: https://8f36cb5d-e565-452a-a09c-81760683c1f9.k8s.ondigitalocean.com
|
||||
name: do-sfo3-k8s-nr
|
||||
contexts:
|
||||
- context:
|
||||
cluster: do-sfo3-k8s-nr
|
||||
user: do-sfo3-k8s-nr-admin
|
||||
name: do-sfo3-k8s-nr
|
||||
current-context: do-sfo3-k8s-nr
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: do-sfo3-k8s-nr-admin
|
||||
user:
|
||||
token: dop_v1_dc6f141491e1e3447a52ec192c3424c0481622f5430cf219fb38458280e1ff88
|
||||
23
k8s/multiLine.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
run: busybox
|
||||
name: busybox
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
name: busybox
|
||||
image: busybox
|
||||
22
k8s/multiLine2.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
run: busybox
|
||||
name: busybox
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
name: busybox
|
||||
image: busybox
|
||||
@@ -3,11 +3,13 @@ kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
@@ -3,8 +3,9 @@ kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
terminationGracePeriodSeconds: 0
|
||||
restartPolicy: OnFailure
|
||||
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -17,5 +18,9 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
|
||||
|
||||
@@ -3,14 +3,8 @@ kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
terminationGracePeriodSeconds: 0
|
||||
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
@@ -18,3 +12,15 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
|
||||
28
k8s/nginx-5-with-hostpath.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hostpath-nginx
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
hostPath:
|
||||
path: /home/k8s/myFiles
|
||||
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: myData
|
||||
operator: In
|
||||
values:
|
||||
- present
|
||||
|
||||
|
||||
27
k8s/nginx-git.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
apk add git &&
|
||||
git clone https://github.com/octocat/Spoon-Knife /www
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
28
k8s/nginx-init.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
apk add git &&
|
||||
git clone https://github.com/octocat/Spoon-Knife /www
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
8
k8s/nginx.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: my-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
19
k8s/ping.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: ping
|
||||
name: ping
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command:
|
||||
- ping
|
||||
args:
|
||||
- 127.0.0.1
|
||||
image: alpine
|
||||
name: ping
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
status: {}
|
||||
18
k8s/sampleYaml.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: gerry
|
||||
citizenship: US
|
||||
height-in-cm: 197
|
||||
coder: true
|
||||
friends:
|
||||
- Moe
|
||||
- Larry
|
||||
- Curly
|
||||
employees:
|
||||
- name: Moe
|
||||
position: dev
|
||||
- name: Larry
|
||||
position: ops
|
||||
- name: Curly
|
||||
position: devOps
|
||||
poem: |
|
||||
Mary had a little lamb
|
||||
It was very cute
|
||||
26
k8s/sampleYamlAsJson.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "gerry",
|
||||
"citizenship": "US",
|
||||
"height-in-cm": 197,
|
||||
"coder": true,
|
||||
"friends": [
|
||||
"Moe",
|
||||
"Larry",
|
||||
"Curly"
|
||||
],
|
||||
"employees": [
|
||||
{
|
||||
"name": "Moe",
|
||||
"position": "dev"
|
||||
},
|
||||
{
|
||||
"name": "Larry",
|
||||
"position": "ops"
|
||||
},
|
||||
{
|
||||
"name": "Curly",
|
||||
"position": "devOps"
|
||||
}
|
||||
],
|
||||
"poem": "Mary had a little lamb\nIt was very cute\n"
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
#/ /kube-halfday.yml.html 200!
|
||||
#/ /kube-fullday.yml.html 200!
|
||||
#/ /kube-twodays.yml.html 200!
|
||||
/ /kube.yml.html 200!
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
@@ -100,7 +100,11 @@ _We will give more details about namespaces and cgroups later._
|
||||
* But it is easier to use `docker exec`.
|
||||
|
||||
```bash
|
||||
$ docker exec -ti ticktock sh
|
||||
$ docker ps -lq # Get Last Container ID
|
||||
17e4e95e2702
|
||||
$ docker exec 17
|
||||
|
||||
$ docker exec -ti $(docker ps -lq) sh # bash-fu version
|
||||
```
|
||||
|
||||
* This creates a new process (running `sh`) _inside_ the container.
|
||||
|
||||
20
slides/containers/High_Level_View.md
Normal file
@@ -0,0 +1,20 @@
|
||||
|
||||
class: title
|
||||
|
||||
# High Level Discussion
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## White Board Topics
|
||||
|
||||
* What is the real problem that containers solve?
|
||||
* What are the inputs to a Unix Process?
|
||||
* What is the init Process?
|
||||
* Userland vs Kernel
|
||||
* The Root File System
|
||||
* What is an Overlay File System?
|
||||
* Wrapping it all up to represent a container image
|
||||
* Deploying Container images
|
||||
|
||||
318
slides/containers/Macro_View.md
Normal file
@@ -0,0 +1,318 @@
|
||||
|
||||
|
||||
class: title
|
||||
|
||||
# A Macroscopic View
|
||||
|
||||
---
|
||||
|
||||
## Macroscopic Items
|
||||
|
||||
* The business case for containers
|
||||
|
||||
* The problem containers are solving
|
||||
|
||||
* What applications need
|
||||
|
||||
* What is the OS doing provides?
|
||||
|
||||
---
|
||||
|
||||
## What do CIOs worry about?
|
||||
|
||||
Who are the CIO's customers?
|
||||
|
||||
* Business Units: Need Computers to Run Applications
|
||||
* Peak Capacity
|
||||
|
||||
* CFO: Demanding Budget Justifications
|
||||
* Spend Less
|
||||
|
||||
---
|
||||
|
||||
## History of Solutions
|
||||
|
||||
For Each Business Application Buy a Machine
|
||||
|
||||
* Buy a machine for each application
|
||||
|
||||
* Big enough for Peak Load (CPU, Memory, Disk)
|
||||
|
||||
The Age of VMs
|
||||
|
||||
* Buy bigger machines and chop them up into logical machines
|
||||
|
||||
* Distribute your applications as VMs theses machines
|
||||
|
||||
* Observe what and when the application load actually is
|
||||
|
||||
* Possibly rebalance be to inform possibly moving
|
||||
|
||||
But Maintaining Machines (Bare Metal or VM) is hard (Patches, Packages, Drivers, etc)
|
||||
|
||||
---
|
||||
|
||||
## What Developers and Ops worry about
|
||||
|
||||
* Getting Software deployed
|
||||
|
||||
* Mysterious reasons why deployed application doesn't work
|
||||
|
||||
* Developer to Ops:
|
||||
|
||||
* "Hey it works on my development machine..."
|
||||
|
||||
* "I don't know why it isn't working for ***you***"
|
||||
|
||||
* "Everything ***looks*** the same"
|
||||
|
||||
* "I have no idea what could be different"
|
||||
|
||||
---
|
||||
|
||||
## The History of Software Deployment
|
||||
|
||||
Software Deployment is just a reproducible way to install files:
|
||||
|
||||
* Cards
|
||||
|
||||
* Tapes
|
||||
|
||||
* Floppy Disks
|
||||
|
||||
* Zip/Tar Files
|
||||
|
||||
* Installation "Files" (rpm/deb/msi)
|
||||
|
||||
* VM Images
|
||||
|
||||
---
|
||||
|
||||
## What is the Problem Containers are Solving?
|
||||
|
||||
It depends on who you are:
|
||||
|
||||
* For the CIO: Better resource utilization
|
||||
|
||||
* For Ops: Software Distribution
|
||||
|
||||
* For the Developer & Ops: Reproducible Environment
|
||||
|
||||
<BR><BR>
|
||||
|
||||
Ummm, but what exactly are containers....
|
||||
|
||||
* Wait a few more slides...
|
||||
|
||||
---
|
||||
|
||||
## Macroscopic view: Applications and the OS
|
||||
|
||||
Applications:
|
||||
|
||||
* What are the inputs/outputs to a program?
|
||||
|
||||
The OS:
|
||||
|
||||
* What does the OS provide?
|
||||
|
||||
---
|
||||
|
||||
## What are the inputs/outputs to a program?
|
||||
|
||||
Explicitly:
|
||||
* Command Line Arguments
|
||||
* Environment Variables
|
||||
* Standard In
|
||||
* Standard Out/Err
|
||||
|
||||
Implicitly (via the File System):
|
||||
|
||||
* Configuration Files
|
||||
* Other Installed Applications
|
||||
* Any other files
|
||||
|
||||
Also Implicitly
|
||||
|
||||
* Memory
|
||||
* Network
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## What does the OS provide?
|
||||
|
||||
* OS Kernel
|
||||
* Kernel loded at boot time
|
||||
* Sets up disk drives, network cards, other hardware, etc
|
||||
* Manages all hardware, processes, memory, etc
|
||||
* Kernel Space
|
||||
* Low level innards of Kernel (fluid internal API)
|
||||
* No direct access by applications of most Kernel functionality
|
||||
|
||||
|
||||
* User Space (userland) Processes
|
||||
* Code running outside the Kernel
|
||||
* Very stable shim library access from User Space to Kernel Space (Think "fopen")
|
||||
|
||||
* The "init" Process
|
||||
* User Space Process run after Kernel has booted
|
||||
* Always PID 1
|
||||
|
||||
---
|
||||
|
||||
## OS Processes
|
||||
|
||||
* Created when an application is launched
|
||||
* Each has a unique Process ID (PID)
|
||||
|
||||
* Provides it its own logical 'view' of all implicit inputs/output when launching app
|
||||
* File System ( root directory, / )
|
||||
* Memory
|
||||
* Network Adaptors
|
||||
* Other running processes
|
||||
|
||||
---
|
||||
|
||||
## What do we mean by "The OS"
|
||||
|
||||
Different Linux's
|
||||
|
||||
* Ubuntu / Debian; Centos / RHEL; Raspberry Pi; etc
|
||||
|
||||
What do they have in common?
|
||||
|
||||
* They all have a kernel that provides access to Userland (ie fopen)
|
||||
|
||||
* They typically have all the commands (bash, sh, ls, grep, ...)
|
||||
|
||||
What may be different?
|
||||
|
||||
* May use different versions of the Kernel (4.18, 5.4, ...)
|
||||
* Internally different, but providing same Userland API
|
||||
|
||||
* Many other bundled commands, packages and package management tools
|
||||
* Namely what makes it 'Debian' vs 'Centos'
|
||||
|
||||
---
|
||||
|
||||
## What might a 'Minimal' Linux be?
|
||||
|
||||
You could actually just have:
|
||||
|
||||
* A Linux Kernel
|
||||
|
||||
* An application (for simplicity a statically linked C program)
|
||||
|
||||
* The kernel configured to run that application as its 'init' process
|
||||
|
||||
Would you ever do this?
|
||||
|
||||
* Why not?
|
||||
|
||||
* It certainly would be very secure
|
||||
|
||||
---
|
||||
|
||||
## So Finally... What are Containers?
|
||||
|
||||
Containers just a Linux process that 'thinks' it is it's own machine
|
||||
|
||||
* With its own 'view' of things like:
|
||||
* File System ( root directory, / ), Memory, Network Adaptors, Other running processes
|
||||
|
||||
* Leverages our understanding that a (logical) Linux Machine is
|
||||
* A kernel
|
||||
* A bunch of files ( Maybe a few Environment Variables )
|
||||
|
||||
Since it is a process running on a host machine
|
||||
|
||||
* It uses the kernel of the host machine
|
||||
* And of course you need some tools to create the running container process
|
||||
|
||||
---
|
||||
|
||||
## Container Runtimes and Container Images
|
||||
|
||||
The Linux kernel actually has no concept of a container.
|
||||
|
||||
* There have been many 'container' technologies
|
||||
|
||||
* See [A Brief History of containers: From the 1970's till now](https://blog.aquasec.com/a-brief-history-of-containers-from-1970s-chroot-to-docker-2016)
|
||||
|
||||
* Over the years more capabilities have been added to the kernel to make it easier
|
||||
|
||||
<BR>
|
||||
A 'Container technology' is:
|
||||
|
||||
* A Container Image Format of the unit of software deployment
|
||||
* A bundle of all the files and miscellaneous configuration
|
||||
|
||||
* A Container Runtime Engine
|
||||
* Software that takes a Container Image and creates a running container
|
||||
|
||||
---
|
||||
|
||||
## The Container Runtime War is now Over
|
||||
|
||||
The Cloud Native Computing Foundation (CNCF) has standardized containers
|
||||
|
||||
* A standard container image format
|
||||
|
||||
* A standard for building and configuring container runtimes
|
||||
|
||||
* A standard REST API for loading/downloading container image to a registries
|
||||
|
||||
There primary Container Runtimes are:
|
||||
|
||||
* containerd: using the 'docker' Command Line Interface (or Kubernetes)
|
||||
|
||||
* CRI-O: using the 'podman' Command Line Interface (or Kubernetes/OpenShift)
|
||||
|
||||
* Others exists, for example Singularity which has a history in HPC
|
||||
|
||||
---
|
||||
|
||||
## Linux Namespaces Makes Containers Possible
|
||||
|
||||
- Provide processes with their own isolated view of the system.
|
||||
|
||||
- Namespaces limit what you can see (and therefore, what you can use).
|
||||
|
||||
- These namespaces are available in modern kernels:
|
||||
|
||||
- pid: processes
|
||||
- net: network
|
||||
- mnt: root file system (ie chroot)
|
||||
- uts: hostname
|
||||
- ipc
|
||||
- user: UID/GID mapping
|
||||
- time: time
|
||||
- cgroup: Resource Monitoring and Limiting
|
||||
|
||||
- Each process belongs to one namespace of each type.
|
||||
|
||||
---
|
||||
|
||||
## Namespaces are always active
|
||||
|
||||
- Namespaces exist even when you don't use containers.
|
||||
|
||||
- This is a bit similar to the UID field in UNIX processes:
|
||||
|
||||
- all processes have the UID field, even if no user exists on the system
|
||||
|
||||
- the field always has a value / the value is always defined
|
||||
<br/>
|
||||
(i.e. any process running on the system has some UID)
|
||||
|
||||
- the value of the UID field is used when checking permissions
|
||||
<br/>
|
||||
(the UID field determines which resources the process can access)
|
||||
|
||||
- You can replace "UID field" with "namespace" above and it still works!
|
||||
|
||||
- In other words: even when you don't use containers,
|
||||
<br/>there is one namespace of each type, containing all the processes on the system.
|
||||
|
||||
224
slides/containers/Training_Environment_And_Tmux.md
Normal file
@@ -0,0 +1,224 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Our training environment
|
||||
|
||||

|
||||
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to your Virtual Machine
|
||||
|
||||
You need an SSH client.
|
||||
|
||||
* On OS X, Linux, and other UNIX systems, just use `ssh`:
|
||||
|
||||
```bash
|
||||
$ ssh <login>@<ip-address>
|
||||
```
|
||||
|
||||
* On Windows, if you don't have an SSH client, you can download:
|
||||
|
||||
* Putty (www.putty.org)
|
||||
|
||||
* Git BASH (https://git-for-windows.github.io/)
|
||||
|
||||
* MobaXterm (https://mobaxterm.mobatek.net/)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.lab[
|
||||
|
||||
- Log into your VM with your SSH client:
|
||||
```bash
|
||||
ssh `user`@`A.B.C.D`
|
||||
```
|
||||
|
||||
(Replace `user` and `A.B.C.D` with the user and IP address provided to you)
|
||||
|
||||
|
||||
]
|
||||
|
||||
You should see a prompt looking like this:
|
||||
```
|
||||
[A.B.C.D] (...) user@node1 ~
|
||||
$
|
||||
```
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
## Our Docker VM
|
||||
|
||||
About the Lab VM
|
||||
|
||||
- The VM is created just before the training.
|
||||
|
||||
- It will stay up during the whole training.
|
||||
|
||||
- It will be destroyed shortly after the training.
|
||||
|
||||
- It comes pre-loaded with Docker and some other useful tools.
|
||||
|
||||
---
|
||||
|
||||
## Why don't we run Docker locally?
|
||||
|
||||
- I can log into your VMs to help you with labs
|
||||
|
||||
- Installing docker is out of the scope of this class (lots of online docs)
|
||||
|
||||
- It's better to spend time learning containers than fiddling with the installer!
|
||||
|
||||
---
|
||||
class: in-person
|
||||
|
||||
## `tailhist`
|
||||
|
||||
- The shell history of the instructor is available online in real time
|
||||
|
||||
- Note the IP address of the instructor's virtual machine (A.B.C.D)
|
||||
|
||||
- Open http://A.B.C.D:1088 in your browser and you should see the history
|
||||
|
||||
- The history is updated in real time (using a WebSocket connection)
|
||||
|
||||
- It should be green when the WebSocket is connected
|
||||
|
||||
(if it turns red, reloading the page should fix it)
|
||||
|
||||
- If you want to play with it on your lab machine, tailhist is installed
|
||||
|
||||
- sudo apt install firewalld
|
||||
- sudo firewall-cmd --add-port=1088/tcp
|
||||
---
|
||||
|
||||
## Checking your Virtual Machine
|
||||
|
||||
Once logged in, make sure that you can run a basic Docker command:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker version
|
||||
Client:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:10:06 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
Orchestrator: swarm
|
||||
|
||||
Server:
|
||||
Engine:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37 (minimum version 1.12)
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:08:35 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
```
|
||||
]
|
||||
|
||||
If this doesn't work, raise your hand so that an instructor can assist you!
|
||||
|
||||
???
|
||||
|
||||
:EN:Container concepts
|
||||
:FR:Premier contact avec les conteneurs
|
||||
|
||||
:EN:- What's a container engine?
|
||||
:FR:- Qu'est-ce qu'un *container engine* ?
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.lab[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheat sheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → re-attach to session
|
||||
27
slides/containers/Understanding_Images.md
Normal file
@@ -0,0 +1,27 @@
|
||||
|
||||
|
||||
```bash
|
||||
$ docker run -it debian
|
||||
root@ef22f9437171:/# apt-get update
|
||||
|
||||
root@ef22f9437171:/# apt-get install skopeo
|
||||
|
||||
root@ef22f9437171:/# apt-get wget curl jq
|
||||
|
||||
root@ef22f9437171:/# skopeo login docker.io -u containertraining -p testaccount
|
||||
|
||||
$ docker commit $(docker ps -lq) skop
|
||||
```
|
||||
|
||||
```bash
|
||||
root@0ab665194c4f:~# skopeo copy docker://docker.io/containertraining/test-image-0 dir:/root/test-image-0
|
||||
root@0ab665194c4f:~# cd /root/test-image-0
|
||||
root@0ab665194c4f:~# jq <manifest.json .layers[].digest
|
||||
```
|
||||
|
||||
|
||||
Stuff in Exploring-images
|
||||
image-test-0/1/2 + jpg
|
||||
|
||||
|
||||
|
||||
20
slides/containers/exploring-images/Dockerfile.test-image-0
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM busybox
|
||||
|
||||
ADD verifyImageFiles.sh /
|
||||
|
||||
WORKDIR /play
|
||||
|
||||
RUN echo "== LAYER 0 ==" && \
|
||||
echo "A is for Aardvark" >A && \
|
||||
echo "B is for Beetle" >B && \
|
||||
mkdir C/ && \
|
||||
echo "A is for Cowboy Allan" >C/CA && \
|
||||
mkdir -p C/CB && \
|
||||
echo "A is for Cowboy Buffalo Alex" >C/CB/CBA && \
|
||||
echo "B is for Cowboy Buffalo Bill" >C/CB/CBB && \
|
||||
echo "Z is for Cowboy Zeke" >> C/CZ && \
|
||||
mkdir D/ && \
|
||||
echo "A is for Detective Alisha" >D/DA && \
|
||||
echo "B is for Detective Betty" >D/DB && \
|
||||
echo "E is for Elephant" >E && \
|
||||
find . >../state.layer-0
|
||||
17
slides/containers/exploring-images/Dockerfile.test-image-1
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM test-image-0
|
||||
|
||||
WORKDIR /play
|
||||
|
||||
RUN echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H" && \
|
||||
echo "B is for Butterfly" >B && \
|
||||
echo "C is for Cowboy Chuck">C/CC && \
|
||||
mkdir -p C/CD && \
|
||||
echo "A is for Cowboy Dandy Austin" >C/CD/CDA && \
|
||||
rm E && \
|
||||
mkdir F && \
|
||||
echo "A is for Ferret Albert" >F/FA && \
|
||||
echo "G is for Gorilla" >G && \
|
||||
mkdir H && \
|
||||
find . >../state.layer-1
|
||||
|
||||
|
||||
18
slides/containers/exploring-images/Dockerfile.test-image-2
Normal file
@@ -0,0 +1,18 @@
|
||||
FROM test-image-1
|
||||
|
||||
WORKDIR /play
|
||||
|
||||
RUN echo "== LAYER 2 == Remove File C/CA, Remove Dir G, Remove Dir D / Replace with new Dir D, Remove Dir C/CB, Remove Dir C/CB, Remove Dir F, Add File G, Remove Dir H / Create File H" && \
|
||||
rm C/CA && \
|
||||
rm -rf C/CB && \
|
||||
echo "Z is for Cowboy Zoe" >> CZ && \
|
||||
rm -rf D && \
|
||||
mkdir -p D && \
|
||||
echo "A is for Duplicitous Albatros" >D/DA && \
|
||||
rm -rf F && \
|
||||
rm G && \
|
||||
echo "G is for Geccos" >G && \
|
||||
rmdir H \
|
||||
echo "H is for Human" >H && \
|
||||
find . >../state.layer-2
|
||||
|
||||
87
slides/containers/exploring-images/testplan.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
clear
|
||||
|
||||
baseDir=$(pwd)
|
||||
|
||||
rm -rf /tmp/exploringImags
|
||||
|
||||
mkdir -p /tmp/exploringImags
|
||||
|
||||
cd /tmp/exploringImags
|
||||
|
||||
|
||||
echo "== LAYER 0 =="
|
||||
|
||||
echo "A is for Aardvark" >A
|
||||
echo "B is for Beetle" >B
|
||||
|
||||
mkdir C/
|
||||
echo "A is for Cowboy Allan" >C/CA
|
||||
|
||||
mkdir -p C/CB
|
||||
echo "A is for Cowboy Buffalo Alex" >C/CB/CBA
|
||||
echo "B is for Cowboy Buffalo Bill" >C/CB/CBB
|
||||
|
||||
echo "Z is for Cowboy Zeke" >C/CZ
|
||||
|
||||
mkdir D/
|
||||
echo "A is for Detective Alisha" >D/DA
|
||||
echo "B is for Detective Betty" >D/DB
|
||||
|
||||
echo "E is for Elephant" >E
|
||||
|
||||
find . >../state.layer-0
|
||||
tree | grep -v directories | tee ../tree.layer-0
|
||||
|
||||
$baseDir/verifyImageFiles.sh 0 $(pwd)
|
||||
|
||||
|
||||
echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H"
|
||||
|
||||
echo "B is for Butterfly" >B
|
||||
|
||||
echo "C is for Cowboy Chuck">C/CC
|
||||
|
||||
mkdir -p C/CD
|
||||
echo "A is for Cowboy Dandy Austin" >C/CD/CDA
|
||||
|
||||
rm E
|
||||
|
||||
mkdir F
|
||||
echo "A is for Ferret Albert" >F/FA
|
||||
|
||||
echo "G is for Gorilla" >G
|
||||
|
||||
mkdir H
|
||||
|
||||
find . >../state.layer-1
|
||||
tree | grep -v directories | tee ../tree.layer-1
|
||||
|
||||
$baseDir/verifyImageFiles.sh 1 $(pwd)
|
||||
|
||||
|
||||
echo "== LAYER 2 == Remove File C/CA, Remove Dir G, Remove Dir D Replace with new Dir D, Remove Dir C/CB, Remove Dir C/CB, Add File H/HA, Add File, Create Dir I"
|
||||
|
||||
rm C/CA
|
||||
|
||||
rm -rf C/CB
|
||||
|
||||
echo "Z is for Cowboy Zoe" >C/CZ
|
||||
|
||||
rm -rf D
|
||||
mkdir -p D
|
||||
echo "A is for Duplicitous Albatros" >D/DA
|
||||
|
||||
rm -rf F
|
||||
|
||||
rm -rf G
|
||||
echo "G is for Geccos" >G
|
||||
|
||||
rmdir H
|
||||
echo "H is for Human" >H
|
||||
|
||||
|
||||
find . >../state.layer-2
|
||||
tree | grep -v directories | tee ../tree.layer-2
|
||||
|
||||
$baseDir/verifyImageFiles.sh 2 $(pwd)
|
||||
|
||||
88
slides/containers/exploring-images/verifyImageFiles.sh
Normal file
@@ -0,0 +1,88 @@
|
||||
|
||||
fileContentsCompare() {
|
||||
layer=$1
|
||||
text=$2
|
||||
file=$(pwd)/$3
|
||||
|
||||
if [ -f "$file" ]; then
|
||||
|
||||
fileContents=$(cat $file)
|
||||
|
||||
if [ "$fileContents" != "$text" ]; then
|
||||
echo In Layer $layer Unexpected contents in file: $file
|
||||
echo -- Contents: $fileContents
|
||||
echo -- Expected: $text
|
||||
fi
|
||||
else
|
||||
echo Missing File $file in Layer $layer
|
||||
fi
|
||||
}
|
||||
|
||||
checkLayer() {
|
||||
layer=$1
|
||||
|
||||
find . >/tmp/state
|
||||
|
||||
|
||||
if [[ $(diff /tmp/state $targetDir/../state.layer-$layer) ]]; then
|
||||
echo Directory Structure mismatch in layer: $layer
|
||||
diff /tmp/state $targetDir/../state.layer-$layer
|
||||
fi
|
||||
|
||||
case $layer in
|
||||
0)
|
||||
fileContentsCompare $layer "A is for Aardvark" A
|
||||
fileContentsCompare $layer "B is for Beetle" B
|
||||
fileContentsCompare $layer "A is for Cowboy Allan" C/CA
|
||||
fileContentsCompare $layer "A is for Cowboy Buffalo Alex" C/CB/CBA
|
||||
fileContentsCompare $layer "B is for Cowboy Buffalo Bill" C/CB/CBB
|
||||
fileContentsCompare $layer "Z is for Cowboy Zeke" C/CZ
|
||||
fileContentsCompare $layer "A is for Detective Alisha" D/DA
|
||||
fileContentsCompare $layer "B is for Detective Betty" D/DB
|
||||
fileContentsCompare $layer "E is for Elephant" E
|
||||
;;
|
||||
|
||||
# echo "== LAYER 1 == Change File B, Create File C/CC, Add Dir C/CD, Remove File E, Create Dir F, Add File G, Create Empty Dir H"
|
||||
1)
|
||||
fileContentsCompare $layer "A is for Aardvark" A
|
||||
fileContentsCompare $layer "B is for Butterfly" B ## CHANGED FILE B
|
||||
fileContentsCompare $layer "A is for Cowboy Allan" C/CA
|
||||
fileContentsCompare $layer "A is for Cowboy Buffalo Alex" C/CB/CBA
|
||||
fileContentsCompare $layer "B is for Cowboy Buffalo Bill" C/CB/CBB
|
||||
fileContentsCompare $layer "C is for Cowboy Chuck" C/CC ## ADDED FILE C/CC
|
||||
fileContentsCompare $layer "A is for Cowboy Dandy Austin" C/CD/CDA ## ADDED DIR C/CD, ADDED FILE C/CD/CDA
|
||||
fileContentsCompare $layer "Z is for Cowboy Zeke" C/CZ
|
||||
fileContentsCompare $layer "A is for Detective Alisha" D/DA
|
||||
fileContentsCompare $layer "B is for Detective Betty" D/DB
|
||||
## REMOVED FILE E
|
||||
fileContentsCompare $layer "A is for Ferret Albert" F/FA ## ADDED DIR F, ADDED FILE F/A
|
||||
fileContentsCompare $layer "G is for Gorilla" G ## ADDED G
|
||||
## CREATED EMPTY DIR H
|
||||
;;
|
||||
|
||||
# echo "== LAYER 2 == Remove File C/CA, Remove Dir C/CB, Remove Dir C/CB, Remove Dir D Replace with new Dir D, Delete and Recreatee File G, Add File H/HA Create Dir I"
|
||||
2)
|
||||
fileContentsCompare $layer "A is for Aardvark" A
|
||||
fileContentsCompare $layer "B is for Butterfly" B
|
||||
## REMOVED FILE C/CA
|
||||
## REMOVED DIR C/CB
|
||||
fileContentsCompare $layer "C is for Cowboy Chuck" C/CC
|
||||
fileContentsCompare $layer "A is for Cowboy Dandy Austin" C/CD/CDA
|
||||
fileContentsCompare $layer "Z is for Cowboy Zoe" C/CZ ## CHANGED FILE C/CZ
|
||||
## REMOVE DIR D
|
||||
fileContentsCompare $layer "A is for Duplicitous Albatros" D/DA ## RECREATE DIR D, ADD FILE D/DA
|
||||
fileContentsCompare $layer "G is for Geccos" G ## DELETED FILE G, ADDED FILE G (Implicit CHANGED)
|
||||
fileContentsCompare $layer "H is for Human" H ## ADDED FILE H
|
||||
;;
|
||||
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
|
||||
layer=$1
|
||||
targetDir=$2
|
||||
|
||||
echo VERIFYING LAYER $layer
|
||||
|
||||
checkLayer $layer
|
||||
BIN
slides/containers/exploring-images/visualizingLayers.jpg
Normal file
|
After Width: | Height: | Size: 219 KiB |
120
slides/containers/software-deployment.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Container Based Software Deployment
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
|
||||

|
||||
BIN
slides/containers/software-deployment/slide-1.jpg
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
slides/containers/software-deployment/slide-10.jpg
Normal file
|
After Width: | Height: | Size: 106 KiB |
BIN
slides/containers/software-deployment/slide-11.jpg
Normal file
|
After Width: | Height: | Size: 126 KiB |
BIN
slides/containers/software-deployment/slide-12.jpg
Normal file
|
After Width: | Height: | Size: 129 KiB |
BIN
slides/containers/software-deployment/slide-13.jpg
Normal file
|
After Width: | Height: | Size: 123 KiB |
BIN
slides/containers/software-deployment/slide-14.jpg
Normal file
|
After Width: | Height: | Size: 154 KiB |
BIN
slides/containers/software-deployment/slide-15.jpg
Normal file
|
After Width: | Height: | Size: 110 KiB |
BIN
slides/containers/software-deployment/slide-16.jpg
Normal file
|
After Width: | Height: | Size: 138 KiB |
BIN
slides/containers/software-deployment/slide-17.jpg
Normal file
|
After Width: | Height: | Size: 152 KiB |
BIN
slides/containers/software-deployment/slide-2.jpg
Normal file
|
After Width: | Height: | Size: 99 KiB |
BIN
slides/containers/software-deployment/slide-3.jpg
Normal file
|
After Width: | Height: | Size: 62 KiB |
BIN
slides/containers/software-deployment/slide-4.jpg
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
slides/containers/software-deployment/slide-5.jpg
Normal file
|
After Width: | Height: | Size: 141 KiB |
BIN
slides/containers/software-deployment/slide-6.jpg
Normal file
|
After Width: | Height: | Size: 92 KiB |
BIN
slides/containers/software-deployment/slide-7.jpg
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
slides/containers/software-deployment/slide-8.jpg
Normal file
|
After Width: | Height: | Size: 97 KiB |
BIN
slides/containers/software-deployment/slide-9.jpg
Normal file
|
After Width: | Height: | Size: 148 KiB |
46
slides/k8s/alias-and-references.md
Normal file
@@ -0,0 +1,46 @@
|
||||
|
||||
|
||||
# External References && kubectl Aliases
|
||||
|
||||
Class Slides: https://2022-09-nr1.container.training/
|
||||
|
||||
Kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
|
||||
|
||||
Kubernetes API Object and kubectl Explorers
|
||||
|
||||
- https://github.com/GerrySeidman/Kubernetes-Explorer
|
||||
|
||||
Gerry Kubernetes Storage Converence Talks
|
||||
|
||||
- Vault '20: https://www.usenix.org/conference/vault20/presentation/seidman
|
||||
- Data and Dev '21: https://www.youtube.com/watch?v=k_8rWPwJ_38
|
||||
|
||||
Gerry Seidman’s Info
|
||||
|
||||
- gerry.seidman@ardanlabs.com
|
||||
- https://www.linkedin.com/in/gerryseidman/
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Kubectl Aliases
|
||||
|
||||
```bash
|
||||
alias k='kubectl'
|
||||
alias kg='kubectl get'
|
||||
alias kl='kubectl logs'
|
||||
alias ka='kubectl apply -f'
|
||||
alias kd='kubectl delete'
|
||||
alias kdf='kubectl delete -f'
|
||||
alias kb='kubectl describe'
|
||||
alias kex='kubectl explain'
|
||||
alias kx='kubectl expose'
|
||||
alias kr='kubectl run'
|
||||
alias ke='kubectl edit'
|
||||
```
|
||||
Note the below is only because of a quirk in how the lab VMs were installed:
|
||||
```bash
|
||||
echo 'kubectl exec -it $1 -- /bin/sh' >kx
|
||||
chmod +x kx
|
||||
sudo mv kx /usr/local/bin/kx
|
||||
```
|
||||
370
slides/k8s/concepts-k8s-arch.md
Normal file
@@ -0,0 +1,370 @@
|
||||
# Kubernetes Architecture
|
||||
|
||||
- The Kubernetes Architecture is minimal
|
||||
|
||||
- Kubernetes runs in Kubernetes (for the most part)
|
||||
|
||||
- Orchestration is done by a collection of Software Operators
|
||||
|
||||
- You can even write your own operators
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture
|
||||
|
||||
- Ha ha ha ha ha
|
||||
|
||||
- OK, I was trying to scare you, it's much simpler than that ❤️
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Kubernetes Architecture
|
||||
|
||||
- Ha ha ha ha
|
||||
|
||||
- OK, I was trying to scare you, it's much simpler than that ❤️
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI
|
||||
|
||||
(Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/))
|
||||
|
||||
- The second one is a simplified representation of a Kubernetes cluster
|
||||
|
||||
(Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e))
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the nodes
|
||||
|
||||
- The nodes executing our containers run a collection of services:
|
||||
|
||||
- a container Engine (typically Docker)
|
||||
|
||||
- kubelet (the "node agent")
|
||||
|
||||
- kube-proxy (a necessary but not sufficient network component)
|
||||
|
||||
- Nodes were formerly called "minions"
|
||||
|
||||
(You might see that word in older articles or documentation)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the control plane
|
||||
|
||||
- The Kubernetes logic (its "brains") is a collection of services:
|
||||
|
||||
- the API server (our point of entry to everything!)
|
||||
|
||||
- core services like the scheduler and controller manager
|
||||
|
||||
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
|
||||
|
||||
- Together, these services form the control plane of our cluster
|
||||
|
||||
- The control plane is also called the "master"
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
|
||||
(Except for single-node development clusters, like when using minikube)
|
||||
|
||||
- This node is then called a "master"
|
||||
|
||||
(Yes, this is ambiguous: is the "master" a node, or the whole control plane?)
|
||||
|
||||
- Normal applications are restricted from running on this node
|
||||
|
||||
(By using a mechanism called ["taints"](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/))
|
||||
|
||||
- When high availability is required, each service of the control plane must be resilient
|
||||
|
||||
- The control plane is then replicated on multiple nodes
|
||||
|
||||
(This is sometimes called a "multi-master" setup)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
|
||||
- For instance: since `etcd` is a critical service, some people
|
||||
deploy it directly on a dedicated cluster (without containers)
|
||||
|
||||
(This is illustrated on the first "super complicated" schema)
|
||||
|
||||
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
|
||||
|
||||
(We only "see" a Kubernetes API endpoint)
|
||||
|
||||
- In that case, there is no "master node"
|
||||
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master."*
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How many nodes should a cluster have?
|
||||
|
||||
- There is no particular constraint
|
||||
|
||||
(no need to have an odd number of nodes for quorum)
|
||||
|
||||
- A cluster can have zero node
|
||||
|
||||
(but then it won't be able to start any pods)
|
||||
|
||||
- For testing and development, having a single node is fine
|
||||
|
||||
- For production, make sure that you have extra capacity
|
||||
|
||||
(so that your workload still fits if you lose a node or a group of nodes)
|
||||
|
||||
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
|
||||
|
||||
(however, running a cluster of that size requires a lot of tuning)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
|
||||
--
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some runtimes available through CRI
|
||||
|
||||
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
|
||||
|
||||
- maintained by Docker, IBM, and community
|
||||
- used by Docker Engine, microk8s, k3s, GKE; also standalone
|
||||
- comes with its own CLI, `ctr`
|
||||
|
||||
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
|
||||
|
||||
- maintained by Red Hat, SUSE, and community
|
||||
- used by OpenShift and Kubic
|
||||
- designed specifically as a minimal runtime for Kubernetes
|
||||
|
||||
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
|
||||
--
|
||||
|
||||
- In this workshop, we run our app on a single node first
|
||||
|
||||
- We will need to build images and ship them around
|
||||
|
||||
- We can do these things without Docker
|
||||
<br/>
|
||||
(and get diagnosed with NIH¹ syndrome)
|
||||
|
||||
- Docker is still the most stable container engine today
|
||||
<br/>
|
||||
(but other options are maturing very quickly)
|
||||
|
||||
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
|
||||
*Yes, almost certainly*
|
||||
|
||||
- On our production servers:
|
||||
|
||||
*Yes (today)*
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
|
||||
|
||||
---
|
||||
|
||||
## Interacting with Kubernetes
|
||||
|
||||
- We will interact with our Kubernetes cluster through the Kubernetes API
|
||||
|
||||
- The Kubernetes API is (mostly) RESTful
|
||||
|
||||
- It allows us to create, read, update, delete *resources*
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
|
||||
- pod (group of containers running together on a node)
|
||||
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Scaling
|
||||
|
||||
- How would we scale the pod shown on the previous slide?
|
||||
|
||||
- **Do** create additional pods
|
||||
|
||||
- each pod can be on a different node
|
||||
|
||||
- each pod will have its own IP address
|
||||
|
||||
- **Do not** add more NGINX containers in the pod
|
||||
|
||||
- all the NGINX containers would be on the same node
|
||||
|
||||
- they would all have the same IP address
|
||||
<br/>(resulting in `Address alreading in use` errors)
|
||||
|
||||
---
|
||||
|
||||
## Together or separate
|
||||
|
||||
- Should we put e.g. a web application server and a cache together?
|
||||
<br/>
|
||||
("cache" being something like e.g. Memcached or Redis)
|
||||
|
||||
- Putting them **in the same pod** means:
|
||||
|
||||
- they have to be scaled together
|
||||
|
||||
- they can communicate very efficiently over `localhost`
|
||||
|
||||
- Putting them **in different pods** means:
|
||||
|
||||
- they can be scaled separately
|
||||
|
||||
- they must communicate over remote IP addresses
|
||||
<br/>(incurring more latency, lower performance)
|
||||
|
||||
- Both scenarios can make sense, depending on our goals
|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
- it's one of the best Kubernetes architecture diagrams available!
|
||||
|
||||
- The second diagram is courtesy of Weave Works
|
||||
|
||||
- a *pod* can have multiple containers working together
|
||||
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
|
||||
Both diagrams used with permission.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes concepts
|
||||
:FR:- Kubernetes en théorie
|
||||
101
slides/k8s/concepts-k8s-intro.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Kubernetes concepts
|
||||
|
||||
- Kubernetes is a container management system
|
||||
|
||||
- It runs and manages containerized applications on a cluster
|
||||
|
||||
--
|
||||
|
||||
- What does that really mean?
|
||||
|
||||
---
|
||||
|
||||
## What can we do with Kubernetes?
|
||||
|
||||
- Let's imagine that we have a 3-tier e-commerce app:
|
||||
|
||||
- web frontend
|
||||
|
||||
- API backend
|
||||
|
||||
- database (that we will keep out of Kubernetes for now)
|
||||
|
||||
- We have built images for our frontend and backend components
|
||||
|
||||
(e.g. with Dockerfiles and `docker build`)
|
||||
|
||||
- We are running them successfully with a local environment
|
||||
|
||||
(e.g. with Docker Compose)
|
||||
|
||||
- Let's see how we would deploy our app on Kubernetes!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Basic things we can ask Kubernetes to do
|
||||
|
||||
--
|
||||
|
||||
- Start 5 containers using image `atseashop/api:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place an internal load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- Start 10 containers using image `atseashop/webfront:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place a public load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers
|
||||
|
||||
--
|
||||
|
||||
- New release! Replace my containers with the new image `atseashop/webfront:v1.4`
|
||||
|
||||
--
|
||||
|
||||
- Keep processing requests during the upgrade; update my containers one at a time
|
||||
|
||||
---
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Autoscaling
|
||||
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
|
||||
- Resource management and scheduling
|
||||
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
|
||||
- Advanced rollout patterns
|
||||
|
||||
(blue/green deployment, canary deployment)
|
||||
|
||||
---
|
||||
|
||||
## More things that Kubernetes can do for us
|
||||
|
||||
- Batch jobs
|
||||
|
||||
(one-off; parallel; also cron-style periodic execution)
|
||||
|
||||
- Fine-grained access control
|
||||
|
||||
(defining *what* can be done by *whom* on *which* resources)
|
||||
|
||||
- Stateful services
|
||||
|
||||
(databases, message queues, etc.)
|
||||
|
||||
- Automating complex tasks with *operators*
|
||||
|
||||
(e.g. database replication, failover, etc.)
|
||||
|
||||
@@ -316,6 +316,7 @@ class: extra-details
|
||||
## How to find charts, the new way
|
||||
|
||||
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
|
||||
https://artifacthub.io/packages/helm/securecodebox/juice-shop
|
||||
|
||||
- Or use `helm search hub ...` from the CLI
|
||||
|
||||
@@ -343,7 +344,8 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop
|
||||
Then go to → https://artifacthub.io/packages/helm/securecodebox/juice-shop
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
51
slides/k8s/kubectl-commands.md
Normal file
@@ -0,0 +1,51 @@
|
||||
## Basic Commands
|
||||
run
|
||||
create
|
||||
get
|
||||
delete
|
||||
logs
|
||||
explain
|
||||
describe
|
||||
exec
|
||||
## Modifying Objects
|
||||
apply (upsert)
|
||||
set
|
||||
edit
|
||||
patch
|
||||
label
|
||||
annotate
|
||||
https://blog.atomist.com/kubernetes-apply-replace-patch/
|
||||
diff
|
||||
replace
|
||||
wait
|
||||
## NetCommands
|
||||
expose
|
||||
port-forward
|
||||
proxy
|
||||
## Deploy Command
|
||||
rollout
|
||||
scale
|
||||
autoscale
|
||||
## Cluster Management Commands
|
||||
certificate
|
||||
cluster-info
|
||||
cordon
|
||||
uncordon
|
||||
drain
|
||||
taint
|
||||
## Troubleshooting and Debugging Commands
|
||||
top
|
||||
attach
|
||||
cp
|
||||
auth
|
||||
debug
|
||||
## Settings Commands
|
||||
completion
|
||||
## Other Commands
|
||||
alpha
|
||||
api-resources
|
||||
api-versions
|
||||
config
|
||||
plugin
|
||||
version
|
||||
Please Share this API Explorer
|
||||
269
slides/k8s/kubectl-first.md
Normal file
@@ -0,0 +1,269 @@
|
||||
# First contact with `kubectl`
|
||||
|
||||
- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes
|
||||
|
||||
- It is a rich CLI tool around the Kubernetes API
|
||||
|
||||
(Everything you can do with `kubectl`, you can do directly with the API)
|
||||
|
||||
- On our machines, there is a `~/.kube/config` file with:
|
||||
|
||||
- the Kubernetes API address
|
||||
|
||||
- the path to our TLS certificates used to authenticate
|
||||
|
||||
- You can also use the `--kubeconfig` flag to pass a config file
|
||||
|
||||
- Or directly `--server`, `--user`, etc.
|
||||
|
||||
- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"...
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl` is the new SSH
|
||||
|
||||
- We often start managing servers with SSH
|
||||
|
||||
(installing packages, troubleshooting ...)
|
||||
|
||||
- At scale, it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use config management, central logging, etc.
|
||||
|
||||
- In many cases, we still need SSH:
|
||||
|
||||
- as the underlying access method (e.g. Ansible)
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The parallel with `kubectl`
|
||||
|
||||
- We often start managing Kubernetes clusters with `kubectl`
|
||||
|
||||
(deploying applications, troubleshooting ...)
|
||||
|
||||
- At scale (with many applications or clusters), it becomes tedious, repetitive, error-prone
|
||||
|
||||
- Instead, we use automated pipelines, observability tooling, etc.
|
||||
|
||||
- In many cases, we still need `kubectl`:
|
||||
|
||||
- to debug tricky scenarios
|
||||
|
||||
- to inspect and poke at things
|
||||
|
||||
- The Kubernetes API is always the underlying access method
|
||||
|
||||
---
|
||||
|
||||
## `kubectl get`
|
||||
|
||||
- Let's look at our `Node` resources with `kubectl get`!
|
||||
|
||||
.lab[
|
||||
|
||||
- Look at the composition of our cluster:
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
- These commands are equivalent:
|
||||
```bash
|
||||
kubectl get no
|
||||
kubectl get node
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## kubectl is an API Server Client
|
||||
|
||||
- kubectl verbose (-v)
|
||||
|
||||
- --v=6 Display requested resources.
|
||||
|
||||
- --v=7 Display HTTP request headers.
|
||||
|
||||
- --v=8 Display HTTP request contents.
|
||||
|
||||
- --v=9 Display HTTP request contents without truncation of contents.
|
||||
|
||||
```bash
|
||||
kubectl get nodes --v=8
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Obtaining machine-readable output
|
||||
|
||||
- `kubectl get` can output JSON, YAML, or be directly formatted
|
||||
|
||||
.lab[
|
||||
|
||||
- Give us more info about the nodes:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Let's have some YAML:
|
||||
```bash
|
||||
kubectl get no -o yaml
|
||||
```
|
||||
See that `kind: List` at the end? It's the type of our result!
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using `kubectl` and `jq`
|
||||
|
||||
- It's super easy to build custom reports
|
||||
|
||||
.lab[
|
||||
|
||||
- Show the capacity of all our nodes as a stream of JSON objects:
|
||||
```bash
|
||||
kubectl get nodes -o json |
|
||||
jq ".items[] | {name:.metadata.name} + .status.capacity"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exploring types and definitions
|
||||
|
||||
- We can list all available resource types by running `kubectl api-resources`
|
||||
<br/>
|
||||
(In Kubernetes 1.10 and prior, this command used to be `kubectl get`)
|
||||
|
||||
- We can view the definition for a resource type with:
|
||||
```bash
|
||||
kubectl explain type
|
||||
```
|
||||
|
||||
- We can view the definition of a field in a resource, for instance:
|
||||
```bash
|
||||
kubectl explain node.spec
|
||||
```
|
||||
|
||||
- Or get the full definition of all fields and sub-fields:
|
||||
```bash
|
||||
kubectl explain node --recursive
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
- it won't show custom types (like Custom Resource Definitions)
|
||||
|
||||
- we need to make sure that we look at the correct version
|
||||
|
||||
- `kubectl api-resources` and `kubectl explain` perform *introspection*
|
||||
|
||||
(they communicate with the API server and obtain the exact type definitions)
|
||||
|
||||
---
|
||||
|
||||
## Type names
|
||||
|
||||
- The most common resource names have three forms:
|
||||
|
||||
- singular (e.g. `node`, `service`, `deployment`)
|
||||
|
||||
- plural (e.g. `nodes`, `services`, `deployments`)
|
||||
|
||||
- short (e.g. `no`, `svc`, `deploy`)
|
||||
|
||||
- Some resources do not have a short name
|
||||
|
||||
- `Endpoints` only have a plural form
|
||||
|
||||
(because even a single `Endpoints` resource is actually a list of endpoints)
|
||||
|
||||
---
|
||||
|
||||
## Viewing details
|
||||
|
||||
- We can use `kubectl get -o yaml` to see all available details
|
||||
|
||||
- However, YAML output is often simultaneously too much and not enough
|
||||
|
||||
- For instance, `kubectl get node node1 -o yaml` is:
|
||||
|
||||
- too much information (e.g.: list of images available on this node)
|
||||
|
||||
- not enough information (e.g.: doesn't show pods running on this node)
|
||||
|
||||
- difficult to read for a human operator
|
||||
|
||||
- For a comprehensive overview, we can use `kubectl describe` instead
|
||||
|
||||
---
|
||||
|
||||
## `kubectl describe`
|
||||
|
||||
- `kubectl describe` needs a resource type and (optionally) a resource name
|
||||
|
||||
- It is possible to provide a resource name *prefix*
|
||||
|
||||
(all matching objects will be displayed)
|
||||
|
||||
- `kubectl describe` will retrieve some extra information about the resource
|
||||
|
||||
.lab[
|
||||
|
||||
- Look at the information available for `node1` with one of the following commands:
|
||||
```bash
|
||||
kubectl describe node/node1
|
||||
kubectl describe node node1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(We should notice a bunch of control plane pods.)
|
||||
|
||||
---
|
||||
|
||||
## Listing running containers
|
||||
|
||||
- Containers are manipulated through *pods*
|
||||
|
||||
- A pod is a group of containers:
|
||||
|
||||
- running together (on the same node)
|
||||
|
||||
- sharing resources (RAM, CPU; but also network, volumes)
|
||||
|
||||
.lab[
|
||||
|
||||
- List pods on our cluster:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Where are the pods that we saw just a moment earlier?!?*
|
||||
|
||||
340
slides/k8s/kubectl-more.md
Normal file
@@ -0,0 +1,340 @@
|
||||
# More contact with `kubectl`
|
||||
|
||||
- Namespaces
|
||||
- Clusters
|
||||
- Proxy
|
||||
|
||||
---
|
||||
|
||||
## Namespaces
|
||||
|
||||
- Namespaces allow us to segregate resources
|
||||
|
||||
.lab[
|
||||
|
||||
- List the namespaces on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get namespaces
|
||||
kubectl get namespace
|
||||
kubectl get ns
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*You know what ... This `kube-system` thing looks suspicious.*
|
||||
|
||||
*In fact, I'm pretty sure it showed up earlier, when we did:*
|
||||
|
||||
`kubectl describe node node1`
|
||||
|
||||
---
|
||||
|
||||
## Accessing namespaces
|
||||
|
||||
- By default, `kubectl` uses the `default` namespace
|
||||
|
||||
- We can see resources in all namespaces with `--all-namespaces`
|
||||
|
||||
.lab[
|
||||
|
||||
- List the pods in all namespaces:
|
||||
```bash
|
||||
kubectl get pods --all-namespaces
|
||||
```
|
||||
|
||||
- Since Kubernetes 1.14, we can also use `-A` as a shorter version:
|
||||
```bash
|
||||
kubectl get pods -A
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Here are our system pods!*
|
||||
|
||||
---
|
||||
|
||||
## What are all these control plane pods?
|
||||
|
||||
- `etcd` is our etcd server
|
||||
|
||||
- `kube-apiserver` is the API server
|
||||
|
||||
- `kube-controller-manager` and `kube-scheduler` are other control plane components
|
||||
|
||||
- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/))
|
||||
|
||||
- `kube-proxy` is the (per-node) component managing port mappings and such
|
||||
|
||||
- `weave` is the (per-node) component managing the network overlay
|
||||
|
||||
- the `READY` column indicates the number of containers in each pod
|
||||
|
||||
(1 for most pods, but `weave` has 2, for instance)
|
||||
|
||||
---
|
||||
|
||||
## Scoping another namespace
|
||||
|
||||
- We can also look at a different namespace (other than `default`)
|
||||
|
||||
.lab[
|
||||
|
||||
- List only the pods in the `kube-system` namespace:
|
||||
```bash
|
||||
kubectl get pods --namespace=kube-system
|
||||
kubectl get pods -n kube-system
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Namespaces and other `kubectl` commands
|
||||
|
||||
- We can use `-n`/`--namespace` with almost every `kubectl` command
|
||||
|
||||
- Example:
|
||||
|
||||
- `kubectl create --namespace=X` to create something in namespace X
|
||||
|
||||
- We can use `-A`/`--all-namespaces` with most commands that manipulate multiple objects
|
||||
|
||||
- Examples:
|
||||
|
||||
- `kubectl delete` can delete resources across multiple namespaces
|
||||
|
||||
- `kubectl label` can add/remove/update labels across multiple namespaces
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about `kube-public`?
|
||||
|
||||
.lab[
|
||||
|
||||
- List the pods in the `kube-public` namespace:
|
||||
```bash
|
||||
kubectl -n kube-public get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Nothing!
|
||||
|
||||
`kube-public` is created by kubeadm & [used for security bootstrapping](https://kubernetes.io/blog/2017/01/stronger-foundation-for-creating-and-managing-kubernetes-clusters).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exploring `kube-public`
|
||||
|
||||
- The only interesting object in `kube-public` is a ConfigMap named `cluster-info`
|
||||
|
||||
.lab[
|
||||
|
||||
- List ConfigMap objects:
|
||||
```bash
|
||||
kubectl -n kube-public get configmaps
|
||||
```
|
||||
|
||||
- Inspect `cluster-info`:
|
||||
```bash
|
||||
kubectl -n kube-public get configmap cluster-info -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note the `selfLink` URI: `/api/v1/namespaces/kube-public/configmaps/cluster-info`
|
||||
|
||||
We can use that!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Accessing `cluster-info`
|
||||
|
||||
- Earlier, when trying to access the API server, we got a `Forbidden` message
|
||||
|
||||
- But `cluster-info` is readable by everyone (even without authentication)
|
||||
|
||||
.lab[
|
||||
|
||||
- Retrieve `cluster-info`:
|
||||
```bash
|
||||
curl -k https://10.96.0.1/api/v1/namespaces/kube-public/configmaps/cluster-info
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- We were able to access `cluster-info` (without auth)
|
||||
|
||||
- It contains a `kubeconfig` file
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Retrieving `kubeconfig`
|
||||
|
||||
- We can easily extract the `kubeconfig` file from this ConfigMap
|
||||
|
||||
.lab[
|
||||
|
||||
- Display the content of `kubeconfig`:
|
||||
```bash
|
||||
curl -sk https://10.96.0.1/api/v1/namespaces/kube-public/configmaps/cluster-info \
|
||||
| jq -r .data.kubeconfig
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- This file holds the canonical address of the API server, and the public key of the CA
|
||||
|
||||
- This file *does not* hold client keys or tokens
|
||||
|
||||
- This is not sensitive information, but allows us to establish trust
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What about `kube-node-lease`?
|
||||
|
||||
- Starting with Kubernetes 1.14, there is a `kube-node-lease` namespace
|
||||
|
||||
(or in Kubernetes 1.13 if the NodeLease feature gate is enabled)
|
||||
|
||||
- That namespace contains one Lease object per node
|
||||
|
||||
- *Node leases* are a new way to implement node heartbeats
|
||||
|
||||
(i.e. node regularly pinging the control plane to say "I'm alive!")
|
||||
|
||||
- For more details, see [Efficient Node Heartbeats KEP] or the [node controller documentation]
|
||||
|
||||
[Efficient Node Heartbeats KEP]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/589-efficient-node-heartbeats/README.md
|
||||
[node controller documentation]: https://kubernetes.io/docs/concepts/architecture/nodes/#node-controller
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.lab[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.lab[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown by `kubectl get svc`
|
||||
|
||||
]
|
||||
|
||||
The command above should either time out, or show an authentication error. Why?
|
||||
|
||||
---
|
||||
|
||||
## Time out
|
||||
|
||||
- Connections to ClusterIP services only work *from within the cluster*
|
||||
|
||||
- If we are outside the cluster, the `curl` command will probably time out
|
||||
|
||||
(Because the IP address, e.g. 10.96.0.1, isn't routed properly outside the cluster)
|
||||
|
||||
- This is the case with most "real" Kubernetes clusters
|
||||
|
||||
- To try the connection from within the cluster, we can use [shpod](https://github.com/jpetazzo/shpod)
|
||||
|
||||
---
|
||||
|
||||
## Authentication error
|
||||
|
||||
This is what we should see when connecting from within the cluster:
|
||||
```json
|
||||
$ curl -k https://10.96.0.1
|
||||
{
|
||||
"kind": "Status",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
|
||||
},
|
||||
"status": "Failure",
|
||||
"message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
|
||||
"reason": "Forbidden",
|
||||
"details": {
|
||||
|
||||
},
|
||||
"code": 403
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- We can see `kind`, `apiVersion`, `metadata`
|
||||
|
||||
- These are typical of a Kubernetes API reply
|
||||
|
||||
- Because we *are* talking to the Kubernetes API
|
||||
|
||||
- The Kubernetes API tells us "Forbidden"
|
||||
|
||||
(because it requires authentication)
|
||||
|
||||
- The Kubernetes API is reachable from within the cluster
|
||||
|
||||
(many apps integrating with Kubernetes will use this)
|
||||
|
||||
---
|
||||
|
||||
## DNS integration
|
||||
|
||||
- Each service also gets a DNS record
|
||||
|
||||
- The Kubernetes DNS resolver is available *from within pods*
|
||||
|
||||
(and sometimes, from within nodes, depending on configuration)
|
||||
|
||||
- Code running in pods can connect to services using their name
|
||||
|
||||
(e.g. https://kubernetes/...)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Getting started with kubectl
|
||||
:FR:- Se familiariser avec kubectl
|
||||
399
slides/k8s/kubectl-run-deployment.md
Normal file
@@ -0,0 +1,399 @@
|
||||
|
||||
# Scaling our application
|
||||
|
||||
- `kubectl` gives us a simple command to scale a workload:
|
||||
|
||||
`kubectl scale TYPE NAME --replicas=HOWMANY`
|
||||
|
||||
- Let's try it on our Pod, so that we have more Pods!
|
||||
|
||||
.lab[
|
||||
|
||||
- Try to scale the Pod:
|
||||
```bash
|
||||
kubectl scale pod pingpong --replicas=3
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
🤔 We get the following error, what does that mean?
|
||||
|
||||
```
|
||||
Error from server (NotFound): the server could not find the requested resource
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scaling a Pod
|
||||
|
||||
- We cannot "scale a Pod"
|
||||
|
||||
(that's not completely true; we could give it more CPU/RAM)
|
||||
|
||||
- If we want more Pods, we need to create more Pods
|
||||
|
||||
(i.e. execute `kubectl run` multiple times)
|
||||
|
||||
- There must be a better way!
|
||||
|
||||
(spoiler alert: yes, there is a better way!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `NotFound`
|
||||
|
||||
- What's the meaning of that error?
|
||||
```
|
||||
Error from server (NotFound): the server could not find the requested resource
|
||||
```
|
||||
|
||||
- When we execute `kubectl scale THAT-RESOURCE --replicas=THAT-MANY`,
|
||||
<br/>
|
||||
it is like telling Kubernetes:
|
||||
|
||||
*go to THAT-RESOURCE and set the scaling button to position THAT-MANY*
|
||||
|
||||
- Pods do not have a "scaling button"
|
||||
|
||||
- Try to execute the `kubectl scale pod` command with `-v6`
|
||||
|
||||
- We see a `PATCH` request to `/scale`: that's the "scaling button"
|
||||
|
||||
(technically it's called a *subresource* of the Pod)
|
||||
|
||||
---
|
||||
|
||||
## Creating more pods
|
||||
|
||||
- We are going to create a ReplicaSet
|
||||
|
||||
(= set of replicas = set of identical pods)
|
||||
|
||||
- In fact, we will create a Deployment, which itself will create a ReplicaSet
|
||||
|
||||
- Why so many layers? We'll explain that shortly, don't worry!
|
||||
|
||||
---
|
||||
|
||||
## Creating a Deployment running `ping`
|
||||
|
||||
- Let's create a Deployment instead of a single Pod
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the Deployment; pay attention to the `--`:
|
||||
```bash
|
||||
kubectl create deployment pingpong --image=alpine -- ping 127.0.0.1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The `--` is used to separate:
|
||||
|
||||
- "options/flags of `kubectl create`
|
||||
|
||||
- command to run in the container
|
||||
|
||||
---
|
||||
|
||||
## What has been created?
|
||||
|
||||
.lab[
|
||||
|
||||
<!-- ```hide kubectl wait pod --selector=app=pingpong --for condition=ready ``` -->
|
||||
|
||||
- Check the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: `kubectl get all` is a lie. It doesn't show everything.
|
||||
|
||||
(But it shows a lot of "usual suspects", i.e. commonly used resources.)
|
||||
|
||||
---
|
||||
|
||||
## There's a lot going on here!
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/pingpong 1/1 Running 0 4m17s
|
||||
pod/pingpong-6ccbc77f68-kmgfn 1/1 Running 0 11s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h45
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/pingpong 1/1 1 1 11s
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/pingpong-6ccbc77f68 1 1 1 11s
|
||||
```
|
||||
|
||||
Our new Pod is not named `pingpong`, but `pingpong-xxxxxxxxxxx-yyyyy`.
|
||||
|
||||
We have a Deployment named `pingpong`, and an extra ReplicaSet, too. What's going on?
|
||||
|
||||
---
|
||||
|
||||
## From Deployment to Pod
|
||||
|
||||
We have the following resources:
|
||||
|
||||
- `deployment.apps/pingpong`
|
||||
|
||||
This is the Deployment that we just created.
|
||||
|
||||
- `replicaset.apps/pingpong-xxxxxxxxxx`
|
||||
|
||||
This is a Replica Set created by this Deployment.
|
||||
|
||||
- `pod/pingpong-xxxxxxxxxx-yyyyy`
|
||||
|
||||
This is a *pod* created by the Replica Set.
|
||||
|
||||
Let's explain what these things are.
|
||||
|
||||
---
|
||||
|
||||
## Pod
|
||||
|
||||
- Can have one or multiple containers
|
||||
|
||||
- Runs on a single node
|
||||
|
||||
(Pod cannot "straddle" multiple nodes)
|
||||
|
||||
- Pods cannot be moved
|
||||
|
||||
(e.g. in case of node outage)
|
||||
|
||||
- Pods cannot be scaled horizontally
|
||||
|
||||
(except by manually creating more Pods)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Pod details
|
||||
|
||||
- A Pod is not a process; it's an environment for containers
|
||||
|
||||
- it cannot be "restarted"
|
||||
|
||||
- it cannot "crash"
|
||||
|
||||
- The containers in a Pod can crash
|
||||
|
||||
- They may or may not get restarted
|
||||
|
||||
(depending on Pod's restart policy)
|
||||
|
||||
- If all containers exit successfully, the Pod ends in "Succeeded" phase
|
||||
|
||||
- If some containers fail and don't get restarted, the Pod ends in "Failed" phase
|
||||
|
||||
---
|
||||
|
||||
## Replica Set
|
||||
|
||||
- Set of identical (replicated) Pods
|
||||
|
||||
- Defined by a pod template + number of desired replicas
|
||||
|
||||
- If there are not enough Pods, the Replica Set creates more
|
||||
|
||||
(e.g. in case of node outage; or simply when scaling up)
|
||||
|
||||
- If there are too many Pods, the Replica Set deletes some
|
||||
|
||||
(e.g. if a node was disconnected and comes back; or when scaling down)
|
||||
|
||||
- We can scale up/down a Replica Set
|
||||
|
||||
- we update the manifest of the Replica Set
|
||||
|
||||
- as a consequence, the Replica Set controller creates/deletes Pods
|
||||
|
||||
---
|
||||
|
||||
## Deployment
|
||||
|
||||
- Replica Sets control *identical* Pods
|
||||
|
||||
- Deployments are used to roll out different Pods
|
||||
|
||||
(different image, command, environment variables, ...)
|
||||
|
||||
- When we update a Deployment with a new Pod definition:
|
||||
|
||||
- a new Replica Set is created with the new Pod definition
|
||||
|
||||
- that new Replica Set is progressively scaled up
|
||||
|
||||
- meanwhile, the old Replica Set(s) is(are) scaled down
|
||||
|
||||
- This is a *rolling update*, minimizing application downtime
|
||||
|
||||
- When we scale up/down a Deployment, it scales up/down its Replica Set
|
||||
|
||||
---
|
||||
|
||||
## Can we scale now?
|
||||
|
||||
- Let's try `kubectl scale` again, but on the Deployment!
|
||||
|
||||
.lab[
|
||||
|
||||
- Scale our `pingpong` deployment:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas 3
|
||||
```
|
||||
|
||||
- Note that we could also write it like this:
|
||||
```bash
|
||||
kubectl scale deployment/pingpong --replicas 3
|
||||
```
|
||||
|
||||
- Check that we now have multiple pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Scaling a Replica Set
|
||||
|
||||
- What if we scale the Replica Set instead of the Deployment?
|
||||
|
||||
- The Deployment would notice it right away and scale back to the initial level
|
||||
|
||||
- The Replica Set makes sure that we have the right numbers of Pods
|
||||
|
||||
- The Deployment makes sure that the Replica Set has the right size
|
||||
|
||||
(conceptually, it delegates the management of the Pods to the Replica Set)
|
||||
|
||||
- This might seem weird (why this extra layer?) but will soon make sense
|
||||
|
||||
(when we will look at how rolling updates work!)
|
||||
|
||||
---
|
||||
|
||||
## Checking Deployment logs
|
||||
|
||||
- `kubectl logs` needs a Pod name
|
||||
|
||||
- But it can also work with a *type/name*
|
||||
|
||||
(e.g. `deployment/pingpong`)
|
||||
|
||||
.lab[
|
||||
|
||||
- View the result of our `ping` command:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- It shows us the logs of the first Pod of the Deployment
|
||||
|
||||
- We'll see later how to get the logs of *all* the Pods!
|
||||
|
||||
---
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
|
||||
- The *replica set* ensures that the right number of *pods* are running
|
||||
|
||||
- What happens if pods disappear?
|
||||
|
||||
.lab[
|
||||
|
||||
- In a separate window, watch the list of pods:
|
||||
```bash
|
||||
watch kubectl get pods
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Every 2.0s```
|
||||
```tmux split-pane -v```
|
||||
-->
|
||||
|
||||
- Destroy the pod currently shown by `kubectl logs`:
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
|
||||
<!--
|
||||
```tmux select-pane -t 0```
|
||||
```copy pingpong-[^-]*-.....```
|
||||
```tmux last-pane```
|
||||
```keys kubectl delete pod ```
|
||||
```paste```
|
||||
```key ^J```
|
||||
```check```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What happened?
|
||||
|
||||
- `kubectl delete pod` terminates the pod gracefully
|
||||
|
||||
(sending it the TERM signal and waiting for it to shutdown)
|
||||
|
||||
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
|
||||
|
||||
- But we can still see the output of the "Terminating" pod in `kubectl logs`
|
||||
|
||||
- Until 30 seconds later, when the grace period expires
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
|
||||
## Deleting a standalone Pod
|
||||
|
||||
- What happens if we delete a standalone Pod?
|
||||
|
||||
(like the first `pingpong` Pod that we created)
|
||||
|
||||
.lab[
|
||||
|
||||
- Delete the Pod:
|
||||
```bash
|
||||
kubectl delete pod pingpong
|
||||
```
|
||||
|
||||
<!--
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
- No replacement Pod gets created because there is no *controller* watching it
|
||||
|
||||
- That's why we will rarely use standalone Pods in practice
|
||||
|
||||
(except for e.g. punctual debugging or executing a short supervised task)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running pods and deployments
|
||||
:FR:- Créer un pod et un déploiement
|
||||
346
slides/k8s/kubectl-run-pod.md
Normal file
@@ -0,0 +1,346 @@
|
||||
# Running our first containers on Kubernetes
|
||||
|
||||
- First things first: we cannot run a container
|
||||
|
||||
--
|
||||
|
||||
- We are going to run a pod, and in that pod there will be a single container
|
||||
|
||||
--
|
||||
|
||||
- In that container in the pod, we are going to run a simple `ping` command
|
||||
|
||||
---
|
||||
|
||||
## Starting a simple pod with `kubectl run`
|
||||
|
||||
- `kubectl run` is convenient to start a single pod
|
||||
|
||||
- We need to specify at least a *name* and the image we want to use
|
||||
|
||||
- Optionally, we can specify the command to run in the pod
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's ping the address of `localhost`, the loopback interface:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine ping 127.0.0.1
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait pod --selector=run=pingpong --for condition=ready``` -->
|
||||
|
||||
]
|
||||
|
||||
The output tells us that a Pod was created:
|
||||
```
|
||||
pod/pingpong created
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing container output
|
||||
|
||||
- Let's use the `kubectl logs` command
|
||||
|
||||
- It takes a Pod name as argument
|
||||
|
||||
- Unless specified otherwise, it will only show logs of the first container in the pod
|
||||
|
||||
(Good thing there's only one in ours!)
|
||||
|
||||
.lab[
|
||||
|
||||
- View the result of our `ping` command:
|
||||
```bash
|
||||
kubectl logs pingpong
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs in real time
|
||||
|
||||
- Just like `docker logs`, `kubectl logs` supports convenient options:
|
||||
|
||||
- `-f`/`--follow` to stream logs in real time (à la `tail -f`)
|
||||
|
||||
- `--tail` to indicate how many lines you want to see (from the end)
|
||||
|
||||
- `--since` to get logs only after a given timestamp
|
||||
|
||||
.lab[
|
||||
|
||||
- View the latest logs of our `ping` command:
|
||||
```bash
|
||||
kubectl logs pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
- Stop it with Ctrl-C
|
||||
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authoring YAML
|
||||
|
||||
- We have already generated YAML implicitly, with e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- When and why do we need to write our own YAML?
|
||||
|
||||
- How do we write YAML from scratch?
|
||||
|
||||
---
|
||||
|
||||
## The limits of generated YAML
|
||||
|
||||
- Many advanced (and even not-so-advanced) features require to write YAML:
|
||||
|
||||
- pods with multiple containers
|
||||
|
||||
- resource limits
|
||||
|
||||
- healthchecks
|
||||
|
||||
- DaemonSets, StatefulSets
|
||||
|
||||
- and more!
|
||||
|
||||
- How do we access these features?
|
||||
|
||||
---
|
||||
|
||||
## Various ways to write YAML
|
||||
|
||||
- Completely from scratch with our favorite editor
|
||||
|
||||
(yeah, right)
|
||||
|
||||
- Dump an existing resource with `kubectl get -o yaml ...`
|
||||
|
||||
(it is recommended to clean up the result)
|
||||
|
||||
- Ask `kubectl` to generate the YAML
|
||||
|
||||
(with a `kubectl create --dry-run=client -o yaml`)
|
||||
|
||||
- Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML from scratch
|
||||
|
||||
- Start with a namespace:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello
|
||||
```
|
||||
|
||||
- We can use `kubectl explain` to see resource definitions:
|
||||
```bash
|
||||
kubectl explain -r pod.spec
|
||||
```
|
||||
|
||||
- Not the easiest option!
|
||||
|
||||
---
|
||||
|
||||
## Dump the YAML for an existing resource
|
||||
|
||||
- `kubectl get -o yaml` works!
|
||||
|
||||
- A lot of fields in `metadata` are not necessary
|
||||
|
||||
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
|
||||
|
||||
- Most objects will have a `status` field that is not necessary
|
||||
|
||||
- Default or empty values can also be removed for clarity
|
||||
|
||||
- This can be done manually or with the `kubectl-neat` plugin
|
||||
|
||||
`kubectl get -o yaml ... | kubectl neat`
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run=client` option
|
||||
|
||||
.lab[
|
||||
|
||||
- Generate the YAML for a Deployment without creating it:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1
|
||||
|
||||
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1 >ping.yaml
|
||||
```
|
||||
|
||||
- Optionally clean it up with `kubectl neat`, too
|
||||
|
||||
```bash
|
||||
kubectl apply -f ping.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Server-side dry run will do all the work, but *not* persist to etcd
|
||||
|
||||
(all validation and mutation hooks will be executed)
|
||||
|
||||
.lab[
|
||||
|
||||
- Try the same YAML file as earlier, with server-side dry run:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine --dry-run=server ping 127.0.0.1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
|
||||
- The only step that is skipped is "write to etcd"
|
||||
|
||||
- YAML that passes server-side dry run *should* apply successfully
|
||||
|
||||
(unless the cluster state changes by the time the YAML is actually applied)
|
||||
|
||||
- Validating or mutating hooks that have side effects can also be an issue
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- `kubectl diff` does a server-side dry run, *and* shows differences
|
||||
|
||||
.lab[
|
||||
|
||||
- Try `kubectl diff` on the YAML that we tweaked earlier:
|
||||
```bash
|
||||
kubectl diff -f web.yaml
|
||||
```
|
||||
|
||||
<!-- ```wait status:``` -->
|
||||
|
||||
]
|
||||
|
||||
Note: we don't need to specify `--validate=false` here.
|
||||
|
||||
---
|
||||
|
||||
## Advantage of YAML
|
||||
|
||||
- Using YAML (instead of `kubectl create <kind>`) allows to be *declarative*
|
||||
|
||||
- The YAML describes the desired state of our cluster and applications
|
||||
|
||||
- YAML can be stored, versioned, archived (e.g. in git repositories)
|
||||
|
||||
- To change resources, change the YAML files
|
||||
|
||||
(instead of using `kubectl edit`/`scale`/`label`/etc.)
|
||||
|
||||
- Changes can be reviewed before being applied
|
||||
|
||||
(with code reviews, pull requests ...)
|
||||
|
||||
- This workflow is sometimes called "GitOps"
|
||||
|
||||
(there are tools like Weave Flux or GitKube to facilitate it)
|
||||
|
||||
---
|
||||
|
||||
## YAML in practice
|
||||
|
||||
- Get started with `kubectl run ...`
|
||||
|
||||
(until you have something that sort of works)
|
||||
|
||||
- Then, run these commands again, but with `-o yaml --dry-run=client`
|
||||
|
||||
(to generate and save YAML manifests)
|
||||
|
||||
- Try to apply these manifests in a clean environment
|
||||
|
||||
(e.g. a new Namespace)
|
||||
|
||||
- Check that everything works; tweak and iterate if needed
|
||||
|
||||
- Commit the YAML to a repo 💯🏆️
|
||||
|
||||
---
|
||||
|
||||
## "Day 2" YAML
|
||||
|
||||
- Don't hesitate to remove unused fields
|
||||
|
||||
(e.g. `creationTimestamp: null`, most `{}` values...)
|
||||
|
||||
- Check your YAML with:
|
||||
|
||||
[kube-score](https://github.com/zegl/kube-score) (installable with krew)
|
||||
|
||||
[kube-linter](https://github.com/stackrox/kube-linter)
|
||||
|
||||
- Check live resources with tools like [popeye](https://popeyecli.io/)
|
||||
|
||||
- Remember that like all linters, they need to be configured for your needs!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Techniques to write YAML manifests
|
||||
:FR:- Comment écrire des *manifests* YAML
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Multi-Line Command arguments
|
||||
|
||||
|
||||
.lab[
|
||||
```bash
|
||||
/bin/sh -c takes a single string parameter
|
||||
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
```
|
||||
]
|
||||
32
slides/k8s/yaml-in-5-min.md
Normal file
@@ -0,0 +1,32 @@
|
||||
|
||||
# YAML in 5 minutes or less
|
||||
|
||||
- YAML == Yet Another Markup Language
|
||||
|
||||
- Any JSON file can be transformed into YAML
|
||||
|
||||
- YAML is a superset of JSON
|
||||
- ie a valid YAML file can contain JSON
|
||||
|
||||
---
|
||||
|
||||
## YAML Syntax and Types
|
||||
- YAML Syntax is based on indentation
|
||||
|
||||
- YAML Data Types
|
||||
- Name/Value Maps
|
||||
|
||||
- Arrays
|
||||
|
||||
- String
|
||||
|
||||
- Number
|
||||
|
||||
- Boolean
|
||||
|
||||
- YAML support for Multi-line strings
|
||||
|
||||
See Samples
|
||||
- k8s/sampleYaml.yaml
|
||||
- k8s/sampleYamlAsJson.json
|
||||
|
||||
99
slides/kube-jerome.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`#kubernetes-training-january-10-14`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-01-nr.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
- # DAY 1
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- # DAY 2
|
||||
- k8s/ourapponkube.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- # DAY 3
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- exercises/healthchecks-details.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
- exercises/ingress-details.md
|
||||
- # DAY 4
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- # DAY 5
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- exercises/appconfig-details.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/stateful-failover.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
120
slides/kube.aug.yaml
Normal file
@@ -0,0 +1,120 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`Zoom Chat`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-08-nr.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics-gerry.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
# - shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
-
|
||||
# DAY 1
|
||||
- containers/Macro_View.md
|
||||
#- shared/webssh.md
|
||||
#- k8s/versions-k8s.md
|
||||
#- shared/composescale.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
#- shared/hastyconclusions.md
|
||||
# - k8s/shippingimages.md
|
||||
|
||||
- k8s/kubectl-first.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/alias-and-references.md
|
||||
|
||||
- # DAY 2
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectl-more.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/rollout.md
|
||||
- k8s/yamldeploy.md
|
||||
|
||||
- # DAY 3 (Started with 2 hour's lab and discussion)
|
||||
- k8s/daemonset.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/kubenet.md
|
||||
- exercises/healthchecks-details.md
|
||||
|
||||
- # DAY 4
|
||||
- k8s/netpol.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/ingress.md
|
||||
- containers/software-deployment.md
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/volume-claim-templates.md
|
||||
- exercises/ingress-details.md
|
||||
- exercises/appconfig-details.md
|
||||
|
||||
- # DAY 5
|
||||
# - k8s/kubectlproxy.md
|
||||
- k8s/consul.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- shared/thankyou.md
|
||||
|
||||
- # DockerCoins
|
||||
- |
|
||||
# (Docker Coins Example)
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/shippingimages.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
99
slides/kube.orig.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`Zoom Chat`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-08-nr.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics-gerry.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
- # DAY 1
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- # DAY 2
|
||||
- k8s/ourapponkube.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- # DAY 3
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- exercises/healthchecks-details.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
- exercises/ingress-details.md
|
||||
- # DAY 4
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- # DAY 5
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- exercises/appconfig-details.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/stateful-failover.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
124
slides/kube.yml
Normal file
@@ -0,0 +1,124 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`Zoom Chat`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-09-nr1.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics-gerry.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
# - shared/chat-room-slack.md
|
||||
- shared/chat-room-zoom-meeting.md
|
||||
# - shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
-
|
||||
# DAY 1
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- containers/Macro_View.md
|
||||
#- shared/webssh.md
|
||||
#- k8s/versions-k8s.md
|
||||
#- shared/composescale.md
|
||||
- k8s/concepts-k8s-intro.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/yaml-in-5-min.md
|
||||
- k8s/concepts-k8s-arch.md
|
||||
- k8s/deploymentslideshow.md
|
||||
#- shared/hastyconclusions.md
|
||||
# - k8s/shippingimages.md
|
||||
|
||||
- k8s/kubectl-first.md
|
||||
- k8s/kubectl-run-pod.md
|
||||
# - k8s/authoring-yaml.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/alias-and-references.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
|
||||
- # DAY 2
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/kubectl-run-deployment.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectl-more.md
|
||||
- k8s/kubectlexpose.md
|
||||
|
||||
- # DAY 3
|
||||
- k8s/rollout.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/volumes.md
|
||||
|
||||
- # DAY 4
|
||||
- k8s/volume-claim-templates.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/netpol.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/ingress.md
|
||||
- exercises/healthchecks-details.md
|
||||
- exercises/ingress-details.md
|
||||
- exercises/appconfig-details.md
|
||||
|
||||
- # DAY 5
|
||||
# - k8s/kubectlproxy.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/consul.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- shared/thankyou.md
|
||||
|
||||
- # DockerCoins
|
||||
- |
|
||||
# (Docker Coins Example)
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/shippingimages.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- containers/software-deployment.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
62
slides/logistics-gerry.md
Normal file
@@ -0,0 +1,62 @@
|
||||
## While We are Waiting To Get Started ...
|
||||
|
||||
This Side is at [https://2022-09-nr1.container.training/#2](https://2022-09-nr1.container.training/#2)
|
||||
|
||||
- If you have not already done so, please complete this survey: [https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM
|
||||
](https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM)
|
||||
|
||||
- Your lab computers are assigned on this Google Sheet: [https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q](https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q)
|
||||
|
||||
- Enter your name in column 1 in one of the unclaimed rows
|
||||
|
||||
- Your Lab Computer IP address is found in the ** node1 ** column
|
||||
|
||||
- Log into your lab computer: ** ssh -l k8s node1 **
|
||||
|
||||
- Password is: ** relics **
|
||||
|
||||
- Verify all is good with the command: ** kubectl version --short **
|
||||
|
||||
- Class Starts at 8AM EST / 12PM EST each day
|
||||
|
||||
---
|
||||
|
||||
## Introductions
|
||||
|
||||
- Hello! I'm [Gerry Seidman](https://www.linkedin.com/in/gerryseidman), Ardan Labs (gerry.seidman@ardanlabs.com)
|
||||
|
||||
- The training will run Monday to Friday, from 8:00AM to 12:00PM PST (11AM-3PM EST)
|
||||
|
||||
- There will be breaks!
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
<!-- -->
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
[@jpetazzo]: https://twitter.com/jpetazzo
|
||||
[@s0ulshake]: https://twitter.com/s0ulshake
|
||||
[Quantgene]: https://www.quantgene.com/
|
||||
|
||||
---
|
||||
|
||||
## Exercises
|
||||
|
||||
- At the end of each day, there is a series of exercises
|
||||
|
||||
- To make the most out of the training, please try the exercises!
|
||||
|
||||
(it will help to practice and memorize the content of the day)
|
||||
|
||||
- We'll try to finish a bit earlier each day so that you can have lab time
|
||||
|
||||
(and if you can put some extra time that's even better ♥)
|
||||
|
||||
- Each day will start with a quick review of the exercises of the previous day
|
||||
|
||||
- If you have any questions, blockers, curiosities ... Ping me in the zoom chat
|
||||
@@ -1,62 +1,16 @@
|
||||
## Introductions
|
||||
|
||||
⚠️ This slide should be customized by the tutorial instructor(s).
|
||||
- Hello! I'm FIXME
|
||||
|
||||
<!--
|
||||
- The training will run Monday to Friday:
|
||||
|
||||
- Hello! We are:
|
||||
- 11am-3pm (New-York)
|
||||
|
||||
- 👷🏻♀️ AJ ([@s0ulshake], [EphemeraSearch], [Quantgene])
|
||||
- 4pm-8pm (London)
|
||||
|
||||
- 🚁 Alexandre ([@alexbuisine], Enix SAS)
|
||||
- 5pm-9pm (Barcelona, Paris, Berlin)
|
||||
|
||||
- 🐳 Jérôme ([@jpetazzo], Ardan Labs)
|
||||
|
||||
- 🐳 Jérôme ([@jpetazzo], Enix SAS)
|
||||
|
||||
- 🐳 Jérôme ([@jpetazzo], Tiny Shell Script LLC)
|
||||
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
||||
- The training will run for 4 hours, with a 10 minutes break every hour
|
||||
|
||||
(the middle break will be a bit longer)
|
||||
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
||||
- The workshop will run from XXX to YYY
|
||||
|
||||
- There will be a lunch break at ZZZ
|
||||
|
||||
(And coffee breaks!)
|
||||
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- *Especially when you see full screen container pictures!*
|
||||
|
||||
- Live feedback, questions, help: @@CHAT@@
|
||||
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
||||
- You ~~should~~ must ask questions! Lots of questions!
|
||||
|
||||
(especially when you see full screen container pictures)
|
||||
|
||||
- Use @@CHAT@@ to ask questions, get help, etc.
|
||||
|
||||
-->
|
||||
|
||||
<!-- -->
|
||||
- There will be a short break (\~10 min) every hour
|
||||
|
||||
[@alexbuisine]: https://twitter.com/alexbuisine
|
||||
[EphemeraSearch]: https://ephemerasearch.com/
|
||||
@@ -74,8 +28,10 @@
|
||||
|
||||
(it will help to practice and memorize the content of the day)
|
||||
|
||||
- We recommend to take at least one hour to work on the exercises
|
||||
- We'll try to finish a bit earlier each day so that you can have lab time
|
||||
|
||||
(if you understood the content of the day, it will be much faster)
|
||||
(and if you can put some extra time that's even better ♥)
|
||||
|
||||
- Each day will start with a quick review of the exercises of the previous day
|
||||
|
||||
- Ping us on @@CHAT@@!
|
||||
|
||||
20564
slides/out.html
Normal file
@@ -1 +1 @@
|
||||
3.7
|
||||
3.8
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Pre-requirements
|
||||
## Pre-requirements
|
||||
|
||||
- Be comfortable with the UNIX command line
|
||||
|
||||
|
||||