mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-03-02 17:30:20 +00:00
Compare commits
6 Commits
2022-08-nr
...
2022-09-nr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5af30c64bb | ||
|
|
75c5964c30 | ||
|
|
b112c1fae6 | ||
|
|
b4d837bbf5 | ||
|
|
dda21fee01 | ||
|
|
da2806ea93 |
22
k8s/affinity-pod.yaml
Normal file
22
k8s/affinity-pod.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: aff-pod
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
affinity:
|
||||
containers:
|
||||
- name: aff-pod
|
||||
image: alpine
|
||||
command:
|
||||
- sleep
|
||||
args:
|
||||
- "1000"
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cow
|
||||
operator: In
|
||||
values:
|
||||
- elsie
|
||||
22
k8s/init-container.yaml
Normal file
22
k8s/init-container.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: initty
|
||||
spec:
|
||||
volumes:
|
||||
- name: preFetched
|
||||
emptyDir: {}
|
||||
|
||||
containers:
|
||||
- name: main
|
||||
image: main
|
||||
volumeMounts:
|
||||
- name: preFetched
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git-cloner
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /preFetched" ]
|
||||
volumeMounts:
|
||||
- name: preFetched
|
||||
mountPath: /preFetched/
|
||||
18
k8s/k8s-nr-kubeconfig.yaml
Normal file
18
k8s/k8s-nr-kubeconfig.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURKekNDQWcrZ0F3SUJBZ0lDQm5Vd0RRWUpLb1pJaHZjTkFRRUxCUUF3TXpFVk1CTUdBMVVFQ2hNTVJHbG4KYVhSaGJFOWpaV0Z1TVJvd0dBWURWUVFERXhGck9ITmhZWE1nUTJ4MWMzUmxjaUJEUVRBZUZ3MHlNakE1TVRneQpNekV6TWpGYUZ3MDBNakE1TVRneU16RXpNakZhTURNeEZUQVRCZ05WQkFvVERFUnBaMmwwWVd4UFkyVmhiakVhCk1CZ0dBMVVFQXhNUmF6aHpZV0Z6SUVOc2RYTjBaWElnUTBFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUIKRHdBd2dnRUtBb0lCQVFEYnVlN1MzRS9hdFpvQVJjSUllRFJNMG5vMThvaDNEL3cyV3VWQmNaQWppZXhmNGw4VQpldEZlWDBWQmZFZGJqUndIWTYva2VHdHVzS0dXUzNZdUN5RHd3WFNhMEV5NS9LM0ZLUHhEUkdyUWJSNXJkUWg5CmI4NW1IbXVIcUYvQXJHMWJVV2JYQmFRVVhBdXNtMVpjMnNtOXdWQm0vRlRJSTJDdEpReTViVXVIQnY3N01BNHEKUzV3b1liMXkwUHo0OXNuVldiY3BXZ1FxR080SE9JelFJc2crakxYR0lhWi96L0lneHR2M0ZYaVJVUlVIZWhERwplTTVuRDErY1JuUkorcDlLQU9VMUdOZzQwVENoN3hjaGo3UHNJMDV1Q0xVQWFhYVJ4M0pVRFBpRXgxWjVjOHQwCll6aTBXTVVTUVpkTjlUc3UrNGZZaXAyTFpkZGxXOW1ma0NYREFnTUJBQUdqUlRCRE1BNEdBMVVkRHdFQi93UUUKQXdJQmhqQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUFNQjBHQTFVZERnUVdCQlNpcEo3SHZQTkRZMWcrcDNEdwp0TUEvNThmUmFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFuYkNYSHUvM3YrbXRlU3N4TXFxUndJd1c0T015CkdRdzE0aERtYkFRcmovYVo0WkFvZUJIdFJSMGYxTFFXQnVIQTBtTFJvSTFSenpBQWw3V2lNMDd6VU1ETlV2enUKR0FCVmtwOEV6b2RneTlNclFkN2VtZkNJRFA3SkhZV1FzL1VxcGVVZW4zcHljQ3dXZFFXY3ZDR0FtTEZZSzI3TApKcnFKV1JXNGErWTVDUkhqVytzTGJpeTNNMTdrOHVWM1pzMktNS0FUaVNXWUZTUzUrSkg5Tk5WdXNKd1lUZVZPCmJOZG5PbS9ub1NLejYrbHUvUm1NK0NsUFdXakdXcUlHdHZyNFl6b0puZk52UDNXL01FQXlzY3Zlck9jcXUxWTAKa1dmRkg2azVlY3NsK2k1RTFkaE02U0JRaFZzV1crMjFlN1plbVJwc1htNkNyYUZqek4vSFlaMEMzdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: https://8f36cb5d-e565-452a-a09c-81760683c1f9.k8s.ondigitalocean.com
|
||||
name: do-sfo3-k8s-nr
|
||||
contexts:
|
||||
- context:
|
||||
cluster: do-sfo3-k8s-nr
|
||||
user: do-sfo3-k8s-nr-admin
|
||||
name: do-sfo3-k8s-nr
|
||||
current-context: do-sfo3-k8s-nr
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: do-sfo3-k8s-nr-admin
|
||||
user:
|
||||
token: dop_v1_dc6f141491e1e3447a52ec192c3424c0481622f5430cf219fb38458280e1ff88
|
||||
23
k8s/multiLine.yaml
Normal file
23
k8s/multiLine.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
run: busybox
|
||||
name: busybox
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
name: busybox
|
||||
image: busybox
|
||||
22
k8s/multiLine2.yaml
Normal file
22
k8s/multiLine2.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
run: busybox
|
||||
name: busybox
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
name: busybox
|
||||
image: busybox
|
||||
@@ -3,8 +3,9 @@ kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
terminationGracePeriodSeconds: 0
|
||||
restartPolicy: OnFailure
|
||||
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -17,5 +18,9 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
|
||||
|
||||
@@ -3,14 +3,8 @@ kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
terminationGracePeriodSeconds: 0
|
||||
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
@@ -18,3 +12,15 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
|
||||
28
k8s/nginx-5-with-hostpath.yaml
Normal file
28
k8s/nginx-5-with-hostpath.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hostpath-nginx
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
|
||||
volumes:
|
||||
- name: www
|
||||
hostPath:
|
||||
path: /home/k8s/myFiles
|
||||
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: myData
|
||||
operator: In
|
||||
values:
|
||||
- present
|
||||
|
||||
|
||||
27
k8s/nginx-git.yaml
Normal file
27
k8s/nginx-git.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
apk add git &&
|
||||
git clone https://github.com/octocat/Spoon-Knife /www
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
28
k8s/nginx-init.yaml
Normal file
28
k8s/nginx-init.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
apk add git &&
|
||||
git clone https://github.com/octocat/Spoon-Knife /www
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
volumes:
|
||||
- name: www
|
||||
emptyDir: {}
|
||||
|
||||
8
k8s/nginx.yaml
Normal file
8
k8s/nginx.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: my-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
19
k8s/ping.yaml
Normal file
19
k8s/ping.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: ping
|
||||
name: ping
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- command:
|
||||
- ping
|
||||
args:
|
||||
- 127.0.0.1
|
||||
image: alpine
|
||||
name: ping
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
status: {}
|
||||
18
k8s/sampleYaml.yaml
Normal file
18
k8s/sampleYaml.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: gerry
|
||||
citizenship: US
|
||||
height-in-cm: 197
|
||||
coder: true
|
||||
friends:
|
||||
- Moe
|
||||
- Larry
|
||||
- Curly
|
||||
employees:
|
||||
- name: Moe
|
||||
position: dev
|
||||
- name: Larry
|
||||
position: ops
|
||||
- name: Curly
|
||||
position: devOps
|
||||
poem: |
|
||||
Mary had a little lamb
|
||||
It was very cute
|
||||
26
k8s/sampleYamlAsJson.json
Normal file
26
k8s/sampleYamlAsJson.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "gerry",
|
||||
"citizenship": "US",
|
||||
"height-in-cm": 197,
|
||||
"coder": true,
|
||||
"friends": [
|
||||
"Moe",
|
||||
"Larry",
|
||||
"Curly"
|
||||
],
|
||||
"employees": [
|
||||
{
|
||||
"name": "Moe",
|
||||
"position": "dev"
|
||||
},
|
||||
{
|
||||
"name": "Larry",
|
||||
"position": "ops"
|
||||
},
|
||||
{
|
||||
"name": "Curly",
|
||||
"position": "devOps"
|
||||
}
|
||||
],
|
||||
"poem": "Mary had a little lamb\nIt was very cute\n"
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# External References && kubectl Aliases
|
||||
|
||||
Class Slides: https://2022-08-nr.container.training/
|
||||
Class Slides: https://2022-09-nr1.container.training/
|
||||
|
||||
Kubectl Cheat Sheet: https://kubernetes.io/docs/reference/kubectl/cheatsheet/
|
||||
|
||||
@@ -28,15 +28,16 @@ Gerry Seidman’s Info
|
||||
```bash
|
||||
alias k='kubectl'
|
||||
alias kg='kubectl get'
|
||||
alias kl='kubctl logs'
|
||||
alias kl='kubectl logs'
|
||||
alias ka='kubectl apply -f'
|
||||
alias kd='kubectl delete'
|
||||
alias kdf='kubectl delete -f'
|
||||
alias kb='kubectl describe'
|
||||
alias kex='kubectl explain'
|
||||
alias kx='kubectl expose'
|
||||
alias kr='kubectl run'
|
||||
alias ke='kubectl edit'
|
||||
alias kx='kubectl exec -it $1 -- /bin/sh'
|
||||
```
|
||||
```
|
||||
Note the below is only because of a quirk in how the lab VMs were installed:
|
||||
```bash
|
||||
echo 'kubectl exec -it $1 -- /bin/sh' >kx
|
||||
|
||||
370
slides/k8s/concepts-k8s-arch.md
Normal file
370
slides/k8s/concepts-k8s-arch.md
Normal file
@@ -0,0 +1,370 @@
|
||||
# Kubernetes Architecture
|
||||
|
||||
- The Kubernetes Architecture is minimal
|
||||
|
||||
- Kubernetes runs in Kubernetes (for the most part)
|
||||
|
||||
- Orchestration is done by a collection of Software Operators
|
||||
|
||||
- You can even write your own operators
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture
|
||||
|
||||
- Ha ha ha ha ha
|
||||
|
||||
- OK, I was trying to scare you, it's much simpler than that ❤️
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Kubernetes Architecture
|
||||
|
||||
- Ha ha ha ha
|
||||
|
||||
- OK, I was trying to scare you, it's much simpler than that ❤️
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI
|
||||
|
||||
(Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/))
|
||||
|
||||
- The second one is a simplified representation of a Kubernetes cluster
|
||||
|
||||
(Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e))
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the nodes
|
||||
|
||||
- The nodes executing our containers run a collection of services:
|
||||
|
||||
- a container Engine (typically Docker)
|
||||
|
||||
- kubelet (the "node agent")
|
||||
|
||||
- kube-proxy (a necessary but not sufficient network component)
|
||||
|
||||
- Nodes were formerly called "minions"
|
||||
|
||||
(You might see that word in older articles or documentation)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the control plane
|
||||
|
||||
- The Kubernetes logic (its "brains") is a collection of services:
|
||||
|
||||
- the API server (our point of entry to everything!)
|
||||
|
||||
- core services like the scheduler and controller manager
|
||||
|
||||
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
|
||||
|
||||
- Together, these services form the control plane of our cluster
|
||||
|
||||
- The control plane is also called the "master"
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
|
||||
(Except for single-node development clusters, like when using minikube)
|
||||
|
||||
- This node is then called a "master"
|
||||
|
||||
(Yes, this is ambiguous: is the "master" a node, or the whole control plane?)
|
||||
|
||||
- Normal applications are restricted from running on this node
|
||||
|
||||
(By using a mechanism called ["taints"](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/))
|
||||
|
||||
- When high availability is required, each service of the control plane must be resilient
|
||||
|
||||
- The control plane is then replicated on multiple nodes
|
||||
|
||||
(This is sometimes called a "multi-master" setup)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
|
||||
- For instance: since `etcd` is a critical service, some people
|
||||
deploy it directly on a dedicated cluster (without containers)
|
||||
|
||||
(This is illustrated on the first "super complicated" schema)
|
||||
|
||||
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
|
||||
|
||||
(We only "see" a Kubernetes API endpoint)
|
||||
|
||||
- In that case, there is no "master node"
|
||||
|
||||
*For this reason, it is more accurate to say "control plane" rather than "master."*
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## How many nodes should a cluster have?
|
||||
|
||||
- There is no particular constraint
|
||||
|
||||
(no need to have an odd number of nodes for quorum)
|
||||
|
||||
- A cluster can have zero node
|
||||
|
||||
(but then it won't be able to start any pods)
|
||||
|
||||
- For testing and development, having a single node is fine
|
||||
|
||||
- For production, make sure that you have extra capacity
|
||||
|
||||
(so that your workload still fits if you lose a node or a group of nodes)
|
||||
|
||||
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
|
||||
|
||||
(however, running a cluster of that size requires a lot of tuning)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
|
||||
--
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We can leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
- <del>We could also use `rkt` ("Rocket") from CoreOS</del> (deprecated)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Some runtimes available through CRI
|
||||
|
||||
- [containerd](https://github.com/containerd/containerd/blob/master/README.md)
|
||||
|
||||
- maintained by Docker, IBM, and community
|
||||
- used by Docker Engine, microk8s, k3s, GKE; also standalone
|
||||
- comes with its own CLI, `ctr`
|
||||
|
||||
- [CRI-O](https://github.com/cri-o/cri-o/blob/master/README.md):
|
||||
|
||||
- maintained by Red Hat, SUSE, and community
|
||||
- used by OpenShift and Kubic
|
||||
- designed specifically as a minimal runtime for Kubernetes
|
||||
|
||||
- [And more](https://kubernetes.io/docs/setup/production-environment/container-runtimes/)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
|
||||
--
|
||||
|
||||
- In this workshop, we run our app on a single node first
|
||||
|
||||
- We will need to build images and ship them around
|
||||
|
||||
- We can do these things without Docker
|
||||
<br/>
|
||||
(and get diagnosed with NIH¹ syndrome)
|
||||
|
||||
- Docker is still the most stable container engine today
|
||||
<br/>
|
||||
(but other options are maturing very quickly)
|
||||
|
||||
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
|
||||
*Yes, almost certainly*
|
||||
|
||||
- On our production servers:
|
||||
|
||||
*Yes (today)*
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
|
||||
|
||||
---
|
||||
|
||||
## Interacting with Kubernetes
|
||||
|
||||
- We will interact with our Kubernetes cluster through the Kubernetes API
|
||||
|
||||
- The Kubernetes API is (mostly) RESTful
|
||||
|
||||
- It allows us to create, read, update, delete *resources*
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
|
||||
- pod (group of containers running together on a node)
|
||||
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Scaling
|
||||
|
||||
- How would we scale the pod shown on the previous slide?
|
||||
|
||||
- **Do** create additional pods
|
||||
|
||||
- each pod can be on a different node
|
||||
|
||||
- each pod will have its own IP address
|
||||
|
||||
- **Do not** add more NGINX containers in the pod
|
||||
|
||||
- all the NGINX containers would be on the same node
|
||||
|
||||
- they would all have the same IP address
|
||||
<br/>(resulting in `Address alreading in use` errors)
|
||||
|
||||
---
|
||||
|
||||
## Together or separate
|
||||
|
||||
- Should we put e.g. a web application server and a cache together?
|
||||
<br/>
|
||||
("cache" being something like e.g. Memcached or Redis)
|
||||
|
||||
- Putting them **in the same pod** means:
|
||||
|
||||
- they have to be scaled together
|
||||
|
||||
- they can communicate very efficiently over `localhost`
|
||||
|
||||
- Putting them **in different pods** means:
|
||||
|
||||
- they can be scaled separately
|
||||
|
||||
- they must communicate over remote IP addresses
|
||||
<br/>(incurring more latency, lower performance)
|
||||
|
||||
- Both scenarios can make sense, depending on our goals
|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
|
||||
|
||||
- it's one of the best Kubernetes architecture diagrams available!
|
||||
|
||||
- The second diagram is courtesy of Weave Works
|
||||
|
||||
- a *pod* can have multiple containers working together
|
||||
|
||||
- IP addresses are associated with *pods*, not with individual containers
|
||||
|
||||
Both diagrams used with permission.
|
||||
|
||||
???
|
||||
|
||||
:EN:- Kubernetes concepts
|
||||
:FR:- Kubernetes en théorie
|
||||
101
slides/k8s/concepts-k8s-intro.md
Normal file
101
slides/k8s/concepts-k8s-intro.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Kubernetes concepts
|
||||
|
||||
- Kubernetes is a container management system
|
||||
|
||||
- It runs and manages containerized applications on a cluster
|
||||
|
||||
--
|
||||
|
||||
- What does that really mean?
|
||||
|
||||
---
|
||||
|
||||
## What can we do with Kubernetes?
|
||||
|
||||
- Let's imagine that we have a 3-tier e-commerce app:
|
||||
|
||||
- web frontend
|
||||
|
||||
- API backend
|
||||
|
||||
- database (that we will keep out of Kubernetes for now)
|
||||
|
||||
- We have built images for our frontend and backend components
|
||||
|
||||
(e.g. with Dockerfiles and `docker build`)
|
||||
|
||||
- We are running them successfully with a local environment
|
||||
|
||||
(e.g. with Docker Compose)
|
||||
|
||||
- Let's see how we would deploy our app on Kubernetes!
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Basic things we can ask Kubernetes to do
|
||||
|
||||
--
|
||||
|
||||
- Start 5 containers using image `atseashop/api:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place an internal load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- Start 10 containers using image `atseashop/webfront:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place a public load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers
|
||||
|
||||
--
|
||||
|
||||
- New release! Replace my containers with the new image `atseashop/webfront:v1.4`
|
||||
|
||||
--
|
||||
|
||||
- Keep processing requests during the upgrade; update my containers one at a time
|
||||
|
||||
---
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Autoscaling
|
||||
|
||||
(straightforward on CPU; more complex on other metrics)
|
||||
|
||||
- Resource management and scheduling
|
||||
|
||||
(reserve CPU/RAM for containers; placement constraints)
|
||||
|
||||
- Advanced rollout patterns
|
||||
|
||||
(blue/green deployment, canary deployment)
|
||||
|
||||
---
|
||||
|
||||
## More things that Kubernetes can do for us
|
||||
|
||||
- Batch jobs
|
||||
|
||||
(one-off; parallel; also cron-style periodic execution)
|
||||
|
||||
- Fine-grained access control
|
||||
|
||||
(defining *what* can be done by *whom* on *which* resources)
|
||||
|
||||
- Stateful services
|
||||
|
||||
(databases, message queues, etc.)
|
||||
|
||||
- Automating complex tasks with *operators*
|
||||
|
||||
(e.g. database replication, failover, etc.)
|
||||
|
||||
@@ -316,6 +316,7 @@ class: extra-details
|
||||
## How to find charts, the new way
|
||||
|
||||
- Go to the [Artifact Hub](https://artifacthub.io/packages/search?kind=0) (https://artifacthub.io)
|
||||
https://artifacthub.io/packages/helm/securecodebox/juice-shop
|
||||
|
||||
- Or use `helm search hub ...` from the CLI
|
||||
|
||||
@@ -343,7 +344,8 @@ class: extra-details
|
||||
|
||||
]
|
||||
|
||||
Then go to → https://artifacthub.io/packages/helm/seccurecodebox/juice-shop
|
||||
Then go to → https://artifacthub.io/packages/helm/securecodebox/juice-shop
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -86,6 +86,24 @@ class: extra-details
|
||||
|
||||
---
|
||||
|
||||
## kubectl is an API Server Client
|
||||
|
||||
- kubectl verbose (-v)
|
||||
|
||||
- --v=6 Display requested resources.
|
||||
|
||||
- --v=7 Display HTTP request headers.
|
||||
|
||||
- --v=8 Display HTTP request contents.
|
||||
|
||||
- --v=9 Display HTTP request contents without truncation of contents.
|
||||
|
||||
```bash
|
||||
kubectl get nodes --v=8
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Obtaining machine-readable output
|
||||
|
||||
- `kubectl get` can output JSON, YAML, or be directly formatted
|
||||
|
||||
399
slides/k8s/kubectl-run-deployment.md
Normal file
399
slides/k8s/kubectl-run-deployment.md
Normal file
@@ -0,0 +1,399 @@
|
||||
|
||||
# Scaling our application
|
||||
|
||||
- `kubectl` gives us a simple command to scale a workload:
|
||||
|
||||
`kubectl scale TYPE NAME --replicas=HOWMANY`
|
||||
|
||||
- Let's try it on our Pod, so that we have more Pods!
|
||||
|
||||
.lab[
|
||||
|
||||
- Try to scale the Pod:
|
||||
```bash
|
||||
kubectl scale pod pingpong --replicas=3
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
🤔 We get the following error, what does that mean?
|
||||
|
||||
```
|
||||
Error from server (NotFound): the server could not find the requested resource
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scaling a Pod
|
||||
|
||||
- We cannot "scale a Pod"
|
||||
|
||||
(that's not completely true; we could give it more CPU/RAM)
|
||||
|
||||
- If we want more Pods, we need to create more Pods
|
||||
|
||||
(i.e. execute `kubectl run` multiple times)
|
||||
|
||||
- There must be a better way!
|
||||
|
||||
(spoiler alert: yes, there is a better way!)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `NotFound`
|
||||
|
||||
- What's the meaning of that error?
|
||||
```
|
||||
Error from server (NotFound): the server could not find the requested resource
|
||||
```
|
||||
|
||||
- When we execute `kubectl scale THAT-RESOURCE --replicas=THAT-MANY`,
|
||||
<br/>
|
||||
it is like telling Kubernetes:
|
||||
|
||||
*go to THAT-RESOURCE and set the scaling button to position THAT-MANY*
|
||||
|
||||
- Pods do not have a "scaling button"
|
||||
|
||||
- Try to execute the `kubectl scale pod` command with `-v6`
|
||||
|
||||
- We see a `PATCH` request to `/scale`: that's the "scaling button"
|
||||
|
||||
(technically it's called a *subresource* of the Pod)
|
||||
|
||||
---
|
||||
|
||||
## Creating more pods
|
||||
|
||||
- We are going to create a ReplicaSet
|
||||
|
||||
(= set of replicas = set of identical pods)
|
||||
|
||||
- In fact, we will create a Deployment, which itself will create a ReplicaSet
|
||||
|
||||
- Why so many layers? We'll explain that shortly, don't worry!
|
||||
|
||||
---
|
||||
|
||||
## Creating a Deployment running `ping`
|
||||
|
||||
- Let's create a Deployment instead of a single Pod
|
||||
|
||||
.lab[
|
||||
|
||||
- Create the Deployment; pay attention to the `--`:
|
||||
```bash
|
||||
kubectl create deployment pingpong --image=alpine -- ping 127.0.0.1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The `--` is used to separate:
|
||||
|
||||
- "options/flags of `kubectl create`
|
||||
|
||||
- command to run in the container
|
||||
|
||||
---
|
||||
|
||||
## What has been created?
|
||||
|
||||
.lab[
|
||||
|
||||
<!-- ```hide kubectl wait pod --selector=app=pingpong --for condition=ready ``` -->
|
||||
|
||||
- Check the resources that were created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: `kubectl get all` is a lie. It doesn't show everything.
|
||||
|
||||
(But it shows a lot of "usual suspects", i.e. commonly used resources.)
|
||||
|
||||
---
|
||||
|
||||
## There's a lot going on here!
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
pod/pingpong 1/1 Running 0 4m17s
|
||||
pod/pingpong-6ccbc77f68-kmgfn 1/1 Running 0 11s
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h45
|
||||
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
deployment.apps/pingpong 1/1 1 1 11s
|
||||
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
replicaset.apps/pingpong-6ccbc77f68 1 1 1 11s
|
||||
```
|
||||
|
||||
Our new Pod is not named `pingpong`, but `pingpong-xxxxxxxxxxx-yyyyy`.
|
||||
|
||||
We have a Deployment named `pingpong`, and an extra ReplicaSet, too. What's going on?
|
||||
|
||||
---
|
||||
|
||||
## From Deployment to Pod
|
||||
|
||||
We have the following resources:
|
||||
|
||||
- `deployment.apps/pingpong`
|
||||
|
||||
This is the Deployment that we just created.
|
||||
|
||||
- `replicaset.apps/pingpong-xxxxxxxxxx`
|
||||
|
||||
This is a Replica Set created by this Deployment.
|
||||
|
||||
- `pod/pingpong-xxxxxxxxxx-yyyyy`
|
||||
|
||||
This is a *pod* created by the Replica Set.
|
||||
|
||||
Let's explain what these things are.
|
||||
|
||||
---
|
||||
|
||||
## Pod
|
||||
|
||||
- Can have one or multiple containers
|
||||
|
||||
- Runs on a single node
|
||||
|
||||
(Pod cannot "straddle" multiple nodes)
|
||||
|
||||
- Pods cannot be moved
|
||||
|
||||
(e.g. in case of node outage)
|
||||
|
||||
- Pods cannot be scaled horizontally
|
||||
|
||||
(except by manually creating more Pods)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Pod details
|
||||
|
||||
- A Pod is not a process; it's an environment for containers
|
||||
|
||||
- it cannot be "restarted"
|
||||
|
||||
- it cannot "crash"
|
||||
|
||||
- The containers in a Pod can crash
|
||||
|
||||
- They may or may not get restarted
|
||||
|
||||
(depending on Pod's restart policy)
|
||||
|
||||
- If all containers exit successfully, the Pod ends in "Succeeded" phase
|
||||
|
||||
- If some containers fail and don't get restarted, the Pod ends in "Failed" phase
|
||||
|
||||
---
|
||||
|
||||
## Replica Set
|
||||
|
||||
- Set of identical (replicated) Pods
|
||||
|
||||
- Defined by a pod template + number of desired replicas
|
||||
|
||||
- If there are not enough Pods, the Replica Set creates more
|
||||
|
||||
(e.g. in case of node outage; or simply when scaling up)
|
||||
|
||||
- If there are too many Pods, the Replica Set deletes some
|
||||
|
||||
(e.g. if a node was disconnected and comes back; or when scaling down)
|
||||
|
||||
- We can scale up/down a Replica Set
|
||||
|
||||
- we update the manifest of the Replica Set
|
||||
|
||||
- as a consequence, the Replica Set controller creates/deletes Pods
|
||||
|
||||
---
|
||||
|
||||
## Deployment
|
||||
|
||||
- Replica Sets control *identical* Pods
|
||||
|
||||
- Deployments are used to roll out different Pods
|
||||
|
||||
(different image, command, environment variables, ...)
|
||||
|
||||
- When we update a Deployment with a new Pod definition:
|
||||
|
||||
- a new Replica Set is created with the new Pod definition
|
||||
|
||||
- that new Replica Set is progressively scaled up
|
||||
|
||||
- meanwhile, the old Replica Set(s) is(are) scaled down
|
||||
|
||||
- This is a *rolling update*, minimizing application downtime
|
||||
|
||||
- When we scale up/down a Deployment, it scales up/down its Replica Set
|
||||
|
||||
---
|
||||
|
||||
## Can we scale now?
|
||||
|
||||
- Let's try `kubectl scale` again, but on the Deployment!
|
||||
|
||||
.lab[
|
||||
|
||||
- Scale our `pingpong` deployment:
|
||||
```bash
|
||||
kubectl scale deployment pingpong --replicas 3
|
||||
```
|
||||
|
||||
- Note that we could also write it like this:
|
||||
```bash
|
||||
kubectl scale deployment/pingpong --replicas 3
|
||||
```
|
||||
|
||||
- Check that we now have multiple pods:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Scaling a Replica Set
|
||||
|
||||
- What if we scale the Replica Set instead of the Deployment?
|
||||
|
||||
- The Deployment would notice it right away and scale back to the initial level
|
||||
|
||||
- The Replica Set makes sure that we have the right numbers of Pods
|
||||
|
||||
- The Deployment makes sure that the Replica Set has the right size
|
||||
|
||||
(conceptually, it delegates the management of the Pods to the Replica Set)
|
||||
|
||||
- This might seem weird (why this extra layer?) but will soon make sense
|
||||
|
||||
(when we will look at how rolling updates work!)
|
||||
|
||||
---
|
||||
|
||||
## Checking Deployment logs
|
||||
|
||||
- `kubectl logs` needs a Pod name
|
||||
|
||||
- But it can also work with a *type/name*
|
||||
|
||||
(e.g. `deployment/pingpong`)
|
||||
|
||||
.lab[
|
||||
|
||||
- View the result of our `ping` command:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- It shows us the logs of the first Pod of the Deployment
|
||||
|
||||
- We'll see later how to get the logs of *all* the Pods!
|
||||
|
||||
---
|
||||
|
||||
## Resilience
|
||||
|
||||
- The *deployment* `pingpong` watches its *replica set*
|
||||
|
||||
- The *replica set* ensures that the right number of *pods* are running
|
||||
|
||||
- What happens if pods disappear?
|
||||
|
||||
.lab[
|
||||
|
||||
- In a separate window, watch the list of pods:
|
||||
```bash
|
||||
watch kubectl get pods
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Every 2.0s```
|
||||
```tmux split-pane -v```
|
||||
-->
|
||||
|
||||
- Destroy the pod currently shown by `kubectl logs`:
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
|
||||
<!--
|
||||
```tmux select-pane -t 0```
|
||||
```copy pingpong-[^-]*-.....```
|
||||
```tmux last-pane```
|
||||
```keys kubectl delete pod ```
|
||||
```paste```
|
||||
```key ^J```
|
||||
```check```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What happened?
|
||||
|
||||
- `kubectl delete pod` terminates the pod gracefully
|
||||
|
||||
(sending it the TERM signal and waiting for it to shutdown)
|
||||
|
||||
- As soon as the pod is in "Terminating" state, the Replica Set replaces it
|
||||
|
||||
- But we can still see the output of the "Terminating" pod in `kubectl logs`
|
||||
|
||||
- Until 30 seconds later, when the grace period expires
|
||||
|
||||
- The pod is then killed, and `kubectl logs` exits
|
||||
|
||||
---
|
||||
|
||||
## Deleting a standalone Pod
|
||||
|
||||
- What happens if we delete a standalone Pod?
|
||||
|
||||
(like the first `pingpong` Pod that we created)
|
||||
|
||||
.lab[
|
||||
|
||||
- Delete the Pod:
|
||||
```bash
|
||||
kubectl delete pod pingpong
|
||||
```
|
||||
|
||||
<!--
|
||||
```key ^D```
|
||||
```key ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
- No replacement Pod gets created because there is no *controller* watching it
|
||||
|
||||
- That's why we will rarely use standalone Pods in practice
|
||||
|
||||
(except for e.g. punctual debugging or executing a short supervised task)
|
||||
|
||||
???
|
||||
|
||||
:EN:- Running pods and deployments
|
||||
:FR:- Créer un pod et un déploiement
|
||||
346
slides/k8s/kubectl-run-pod.md
Normal file
346
slides/k8s/kubectl-run-pod.md
Normal file
@@ -0,0 +1,346 @@
|
||||
# Running our first containers on Kubernetes
|
||||
|
||||
- First things first: we cannot run a container
|
||||
|
||||
--
|
||||
|
||||
- We are going to run a pod, and in that pod there will be a single container
|
||||
|
||||
--
|
||||
|
||||
- In that container in the pod, we are going to run a simple `ping` command
|
||||
|
||||
---
|
||||
|
||||
## Starting a simple pod with `kubectl run`
|
||||
|
||||
- `kubectl run` is convenient to start a single pod
|
||||
|
||||
- We need to specify at least a *name* and the image we want to use
|
||||
|
||||
- Optionally, we can specify the command to run in the pod
|
||||
|
||||
.lab[
|
||||
|
||||
- Let's ping the address of `localhost`, the loopback interface:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine ping 127.0.0.1
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait pod --selector=run=pingpong --for condition=ready``` -->
|
||||
|
||||
]
|
||||
|
||||
The output tells us that a Pod was created:
|
||||
```
|
||||
pod/pingpong created
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing container output
|
||||
|
||||
- Let's use the `kubectl logs` command
|
||||
|
||||
- It takes a Pod name as argument
|
||||
|
||||
- Unless specified otherwise, it will only show logs of the first container in the pod
|
||||
|
||||
(Good thing there's only one in ours!)
|
||||
|
||||
.lab[
|
||||
|
||||
- View the result of our `ping` command:
|
||||
```bash
|
||||
kubectl logs pingpong
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs in real time
|
||||
|
||||
- Just like `docker logs`, `kubectl logs` supports convenient options:
|
||||
|
||||
- `-f`/`--follow` to stream logs in real time (à la `tail -f`)
|
||||
|
||||
- `--tail` to indicate how many lines you want to see (from the end)
|
||||
|
||||
- `--since` to get logs only after a given timestamp
|
||||
|
||||
.lab[
|
||||
|
||||
- View the latest logs of our `ping` command:
|
||||
```bash
|
||||
kubectl logs pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
- Stop it with Ctrl-C
|
||||
|
||||
<!--
|
||||
```wait seq=3```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authoring YAML
|
||||
|
||||
- We have already generated YAML implicitly, with e.g.:
|
||||
|
||||
- `kubectl run`
|
||||
|
||||
- When and why do we need to write our own YAML?
|
||||
|
||||
- How do we write YAML from scratch?
|
||||
|
||||
---
|
||||
|
||||
## The limits of generated YAML
|
||||
|
||||
- Many advanced (and even not-so-advanced) features require to write YAML:
|
||||
|
||||
- pods with multiple containers
|
||||
|
||||
- resource limits
|
||||
|
||||
- healthchecks
|
||||
|
||||
- DaemonSets, StatefulSets
|
||||
|
||||
- and more!
|
||||
|
||||
- How do we access these features?
|
||||
|
||||
---
|
||||
|
||||
## Various ways to write YAML
|
||||
|
||||
- Completely from scratch with our favorite editor
|
||||
|
||||
(yeah, right)
|
||||
|
||||
- Dump an existing resource with `kubectl get -o yaml ...`
|
||||
|
||||
(it is recommended to clean up the result)
|
||||
|
||||
- Ask `kubectl` to generate the YAML
|
||||
|
||||
(with a `kubectl create --dry-run=client -o yaml`)
|
||||
|
||||
- Use The Docs, Luke
|
||||
|
||||
(the documentation almost always has YAML examples)
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML from scratch
|
||||
|
||||
- Start with a namespace:
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello
|
||||
```
|
||||
|
||||
- We can use `kubectl explain` to see resource definitions:
|
||||
```bash
|
||||
kubectl explain -r pod.spec
|
||||
```
|
||||
|
||||
- Not the easiest option!
|
||||
|
||||
---
|
||||
|
||||
## Dump the YAML for an existing resource
|
||||
|
||||
- `kubectl get -o yaml` works!
|
||||
|
||||
- A lot of fields in `metadata` are not necessary
|
||||
|
||||
(`managedFields`, `resourceVersion`, `uid`, `creationTimestamp` ...)
|
||||
|
||||
- Most objects will have a `status` field that is not necessary
|
||||
|
||||
- Default or empty values can also be removed for clarity
|
||||
|
||||
- This can be done manually or with the `kubectl-neat` plugin
|
||||
|
||||
`kubectl get -o yaml ... | kubectl neat`
|
||||
|
||||
---
|
||||
|
||||
## Generating YAML without creating resources
|
||||
|
||||
- We can use the `--dry-run=client` option
|
||||
|
||||
.lab[
|
||||
|
||||
- Generate the YAML for a Deployment without creating it:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1
|
||||
|
||||
kubectl run pingpong --image alpine --dry-run=client ping 127.0.0.1 >ping.yaml
|
||||
```
|
||||
|
||||
- Optionally clean it up with `kubectl neat`, too
|
||||
|
||||
```bash
|
||||
kubectl apply -f ping.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Server-side dry run
|
||||
|
||||
- Server-side dry run will do all the work, but *not* persist to etcd
|
||||
|
||||
(all validation and mutation hooks will be executed)
|
||||
|
||||
.lab[
|
||||
|
||||
- Try the same YAML file as earlier, with server-side dry run:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine --dry-run=server ping 127.0.0.1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Advantages of server-side dry run
|
||||
|
||||
- The YAML is verified much more extensively
|
||||
|
||||
- The only step that is skipped is "write to etcd"
|
||||
|
||||
- YAML that passes server-side dry run *should* apply successfully
|
||||
|
||||
(unless the cluster state changes by the time the YAML is actually applied)
|
||||
|
||||
- Validating or mutating hooks that have side effects can also be an issue
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `kubectl diff`
|
||||
|
||||
- `kubectl diff` does a server-side dry run, *and* shows differences
|
||||
|
||||
.lab[
|
||||
|
||||
- Try `kubectl diff` on the YAML that we tweaked earlier:
|
||||
```bash
|
||||
kubectl diff -f web.yaml
|
||||
```
|
||||
|
||||
<!-- ```wait status:``` -->
|
||||
|
||||
]
|
||||
|
||||
Note: we don't need to specify `--validate=false` here.
|
||||
|
||||
---
|
||||
|
||||
## Advantage of YAML
|
||||
|
||||
- Using YAML (instead of `kubectl create <kind>`) allows to be *declarative*
|
||||
|
||||
- The YAML describes the desired state of our cluster and applications
|
||||
|
||||
- YAML can be stored, versioned, archived (e.g. in git repositories)
|
||||
|
||||
- To change resources, change the YAML files
|
||||
|
||||
(instead of using `kubectl edit`/`scale`/`label`/etc.)
|
||||
|
||||
- Changes can be reviewed before being applied
|
||||
|
||||
(with code reviews, pull requests ...)
|
||||
|
||||
- This workflow is sometimes called "GitOps"
|
||||
|
||||
(there are tools like Weave Flux or GitKube to facilitate it)
|
||||
|
||||
---
|
||||
|
||||
## YAML in practice
|
||||
|
||||
- Get started with `kubectl run ...`
|
||||
|
||||
(until you have something that sort of works)
|
||||
|
||||
- Then, run these commands again, but with `-o yaml --dry-run=client`
|
||||
|
||||
(to generate and save YAML manifests)
|
||||
|
||||
- Try to apply these manifests in a clean environment
|
||||
|
||||
(e.g. a new Namespace)
|
||||
|
||||
- Check that everything works; tweak and iterate if needed
|
||||
|
||||
- Commit the YAML to a repo 💯🏆️
|
||||
|
||||
---
|
||||
|
||||
## "Day 2" YAML
|
||||
|
||||
- Don't hesitate to remove unused fields
|
||||
|
||||
(e.g. `creationTimestamp: null`, most `{}` values...)
|
||||
|
||||
- Check your YAML with:
|
||||
|
||||
[kube-score](https://github.com/zegl/kube-score) (installable with krew)
|
||||
|
||||
[kube-linter](https://github.com/stackrox/kube-linter)
|
||||
|
||||
- Check live resources with tools like [popeye](https://popeyecli.io/)
|
||||
|
||||
- Remember that like all linters, they need to be configured for your needs!
|
||||
|
||||
???
|
||||
|
||||
:EN:- Techniques to write YAML manifests
|
||||
:FR:- Comment écrire des *manifests* YAML
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Multi-Line Command arguments
|
||||
|
||||
|
||||
.lab[
|
||||
```bash
|
||||
/bin/sh -c takes a single string parameter
|
||||
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "running below scripts"
|
||||
i=0;
|
||||
while true;
|
||||
do
|
||||
echo "$i: $(date)";
|
||||
i=$((i+1));
|
||||
sleep 1;
|
||||
done
|
||||
```
|
||||
]
|
||||
32
slides/k8s/yaml-in-5-min.md
Normal file
32
slides/k8s/yaml-in-5-min.md
Normal file
@@ -0,0 +1,32 @@
|
||||
|
||||
# YAML in 5 minutes or less
|
||||
|
||||
- YAML == Yet Another Markup Language
|
||||
|
||||
- Any JSON file can be transformed into YAML
|
||||
|
||||
- YAML is a superset of JSON
|
||||
- ie a valid YAML file can contain JSON
|
||||
|
||||
---
|
||||
|
||||
## YAML Syntax and Types
|
||||
- YAML Syntax is based on indentation
|
||||
|
||||
- YAML Data Types
|
||||
- Name/Value Maps
|
||||
|
||||
- Arrays
|
||||
|
||||
- String
|
||||
|
||||
- Number
|
||||
|
||||
- Boolean
|
||||
|
||||
- YAML support for Multi-line strings
|
||||
|
||||
See Samples
|
||||
- k8s/sampleYaml.yaml
|
||||
- k8s/sampleYamlAsJson.json
|
||||
|
||||
99
slides/kube-jerome.yml
Normal file
99
slides/kube-jerome.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`#kubernetes-training-january-10-14`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-01-nr.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
- shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
#- shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
- # DAY 1
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
#- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- # DAY 2
|
||||
- k8s/ourapponkube.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/yamldeploy.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- # DAY 3
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- exercises/healthchecks-details.md
|
||||
- k8s/ingress.md
|
||||
#- k8s/ingress-tls.md
|
||||
- exercises/ingress-details.md
|
||||
- # DAY 4
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/cluster-sizing.md
|
||||
- # DAY 5
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- exercises/appconfig-details.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/consul.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/volume-claim-templates.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/openebs.md
|
||||
#- k8s/stateful-failover.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
120
slides/kube.aug.yaml
Normal file
120
slides/kube.aug.yaml
Normal file
@@ -0,0 +1,120 @@
|
||||
title: |
|
||||
Kubernetes
|
||||
Intermediate
|
||||
Training
|
||||
|
||||
chat: "`Zoom Chat`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-08-nr.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
content:
|
||||
- shared/title.md
|
||||
- logistics-gerry.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
# - shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
- exercises/healthchecks-brief.md
|
||||
- exercises/ingress-brief.md
|
||||
- exercises/appconfig-brief.md
|
||||
-
|
||||
# DAY 1
|
||||
- containers/Macro_View.md
|
||||
#- shared/webssh.md
|
||||
#- k8s/versions-k8s.md
|
||||
#- shared/composescale.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
#- shared/hastyconclusions.md
|
||||
# - k8s/shippingimages.md
|
||||
|
||||
- k8s/kubectl-first.md
|
||||
- k8s/authoring-yaml.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/alias-and-references.md
|
||||
|
||||
- # DAY 2
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectl-more.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/rollout.md
|
||||
- k8s/yamldeploy.md
|
||||
|
||||
- # DAY 3 (Started with 2 hour's lab and discussion)
|
||||
- k8s/daemonset.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/kubenet.md
|
||||
- exercises/healthchecks-details.md
|
||||
|
||||
- # DAY 4
|
||||
- k8s/netpol.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/ingress.md
|
||||
- containers/software-deployment.md
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/volume-claim-templates.md
|
||||
- exercises/ingress-details.md
|
||||
- exercises/appconfig-details.md
|
||||
|
||||
- # DAY 5
|
||||
# - k8s/kubectlproxy.md
|
||||
- k8s/consul.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/setup-overview.md
|
||||
- k8s/setup-devel.md
|
||||
- k8s/cluster-sizing.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/k9s.md
|
||||
- k8s/tilt.md
|
||||
- exercises/localcluster-details.md
|
||||
- shared/thankyou.md
|
||||
|
||||
- # DockerCoins
|
||||
- |
|
||||
# (Docker Coins Example)
|
||||
- shared/sampleapp.md
|
||||
- shared/composedown.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/shippingimages.md
|
||||
- exercises/k8sfundamentals-details.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
- k8s/helm-intro.md
|
||||
- k8s/helm-chart-format.md
|
||||
- k8s/helm-create-basic-chart.md
|
||||
- k8s/helm-create-better-chart.md
|
||||
@@ -7,7 +7,7 @@ chat: "`Zoom Chat`"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: https://2022-08-nr.container.training/
|
||||
slides: https://2022-09-nr1.container.training/
|
||||
|
||||
#slidenumberprefix: "#SomeHashTag — "
|
||||
|
||||
@@ -21,12 +21,8 @@ content:
|
||||
- shared/about-slides.md
|
||||
#- shared/chat-room-im.md
|
||||
# - shared/chat-room-slack.md
|
||||
#- shared/chat-room-zoom-meeting.md
|
||||
- shared/chat-room-zoom-webinar.md
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
|
||||
- shared/chat-room-zoom-meeting.md
|
||||
# - shared/chat-room-zoom-webinar.md
|
||||
- shared/toc.md
|
||||
- exercises/k8sfundamentals-brief.md
|
||||
- exercises/localcluster-brief.md
|
||||
@@ -35,57 +31,64 @@ content:
|
||||
- exercises/appconfig-brief.md
|
||||
-
|
||||
# DAY 1
|
||||
- shared/prereqs.md
|
||||
- shared/webssh.md
|
||||
- shared/connecting.md
|
||||
- containers/Macro_View.md
|
||||
#- shared/webssh.md
|
||||
#- k8s/versions-k8s.md
|
||||
#- shared/composescale.md
|
||||
- k8s/concepts-k8s.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/concepts-k8s-intro.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/yaml-in-5-min.md
|
||||
- k8s/concepts-k8s-arch.md
|
||||
- k8s/deploymentslideshow.md
|
||||
#- shared/hastyconclusions.md
|
||||
# - k8s/shippingimages.md
|
||||
|
||||
- k8s/kubectl-first.md
|
||||
- k8s/authoring-yaml.md
|
||||
- k8s/kubectl-run-pod.md
|
||||
# - k8s/authoring-yaml.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/alias-and-references.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
|
||||
- # DAY 2
|
||||
- k8s/labels-annotations.md
|
||||
- k8s/kubectl-logs.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/kubectl-run.md
|
||||
- k8s/kubectl-run-deployment.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectl-more.md
|
||||
- k8s/kubectlexpose.md
|
||||
|
||||
- # DAY 3
|
||||
- k8s/rollout.md
|
||||
- k8s/yamldeploy.md
|
||||
|
||||
- # DAY 3 (Started with 2 hour's lab and discussion)
|
||||
- k8s/daemonset.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/kubenet.md
|
||||
- exercises/healthchecks-details.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/volumes.md
|
||||
|
||||
- # DAY 4
|
||||
- k8s/volume-claim-templates.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/netpol.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/ingress.md
|
||||
- containers/software-deployment.md
|
||||
- k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
- k8s/secrets.md
|
||||
- k8s/volume-claim-templates.md
|
||||
- exercises/healthchecks-details.md
|
||||
- exercises/ingress-details.md
|
||||
- exercises/appconfig-details.md
|
||||
|
||||
- # DAY 5
|
||||
# - k8s/kubectlproxy.md
|
||||
- k8s/consul.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/pv-pvc-sc.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/consul.md
|
||||
- k8s/resource-limits.md
|
||||
- k8s/metrics-server.md
|
||||
- k8s/setup-overview.md
|
||||
@@ -111,6 +114,7 @@ content:
|
||||
- # Extra
|
||||
- |
|
||||
# (Extra content)
|
||||
- containers/software-deployment.md
|
||||
- k8s/horizontal-pod-autoscaler.md
|
||||
- k8s/dashboard.md
|
||||
- k8s/batch-jobs.md
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
## While We are Waiting To Get Started ...
|
||||
|
||||
- If you have not already done so, please complete this survey: [https://tinyurl.com/mrx6fsrh
|
||||
](https://docs.google.com/forms/d/1w6wmnvbzyBPbt6f-k0B1ueA-pnkY_zmvROoeRj-vdFU)
|
||||
This Side is at [https://2022-09-nr1.container.training/#2](https://2022-09-nr1.container.training/#2)
|
||||
|
||||
- Your lab computers are assigned on this Google Sheet: [https://tinyurl.com/53dm9b66](https://docs.google.com/spreadsheets/d/1aXF46q5jtQUOEQGniPffljmrPX0PmYRc0x7XKogFmQU/edit)
|
||||
- If you have not already done so, please complete this survey: [https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM
|
||||
](https://docs.google.com/forms/d/1TEQylRtwZ7M_6fx0Zo9ErBYDeJlASAxSSgExHgafHKM)
|
||||
|
||||
- Your lab computers are assigned on this Google Sheet: [https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q](https://docs.google.com/spreadsheets/d/1s8CboVoTOg9mWPZaRkLXJwRwEqNhceoPuTIKq6K3r7Q)
|
||||
|
||||
- Enter your name in column 1 in one of the unclaimed rows
|
||||
|
||||
@@ -15,7 +17,7 @@
|
||||
|
||||
- Verify all is good with the command: ** kubectl version --short **
|
||||
|
||||
- Class Starts at 9AM PST / 11AM EST each day
|
||||
- Class Starts at 8AM EST / 12PM EST each day
|
||||
|
||||
---
|
||||
|
||||
|
||||
20564
slides/out.html
Normal file
20564
slides/out.html
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user