mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Merge branch 'master' into pod-security-policy
This commit is contained in:
95
k8s/persistent-consul.yaml
Normal file
95
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -248,6 +248,14 @@ EOF"
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
@@ -383,6 +391,15 @@ _cmd_retag() {
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
@@ -481,12 +498,12 @@ _cmd_helmprom() {
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
|
||||
@@ -186,22 +186,48 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Some popular service meshes
|
||||
|
||||
We've learned how to:
|
||||
... And related projects:
|
||||
|
||||
* Understand the ambassador pattern and what it is used for (service portability).
|
||||
|
||||
For more information about the ambassador pattern, including demos on Swarm and ECS:
|
||||
|
||||
* AWS re:invent 2015 [DVO317](https://www.youtube.com/watch?v=7CZFpHUPqXw)
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
* [Consul Connect](https://www.consul.io/docs/connect/index.html)
|
||||
<br/>
|
||||
Transparently secures service-to-service connections with mTLS.
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
<br/>
|
||||
API gateway that can interconnect applications on VMs, containers, and serverless.
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
<br/>
|
||||
A popular service mesh.
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
<br/>
|
||||
Another popular service mesh.
|
||||
|
||||
---
|
||||
|
||||
## Learning more about service meshes
|
||||
|
||||
A few blog posts about service meshes:
|
||||
|
||||
* [Containers, microservices, and service meshes](http://jpetazzo.github.io/2019/05/17/containers-microservices-service-meshes/)
|
||||
<br/>
|
||||
Provides historical context: how did we do before service meshes were invented?
|
||||
|
||||
* [Do I Need a Service Mesh?](https://www.nginx.com/blog/do-i-need-a-service-mesh/)
|
||||
<br/>
|
||||
Explains the purpose of service meshes. Illustrates some NGINX features.
|
||||
|
||||
* [Do you need a service mesh?](https://www.oreilly.com/ideas/do-you-need-a-service-mesh)
|
||||
<br/>
|
||||
Includes high-level overview and definitions.
|
||||
|
||||
* [What is Service Mesh and Why Do We Need It?](https://containerjournal.com/2018/12/12/what-is-service-mesh-and-why-do-we-need-it/)
|
||||
<br/>
|
||||
Includes a step-by-step demo of Linkerd.
|
||||
|
||||
And a video:
|
||||
|
||||
* [What is a Service Mesh, and Do I Need One When Developing Microservices?](https://www.datawire.io/envoyproxy/service-mesh/)
|
||||
|
||||
@@ -528,7 +528,9 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
If you want to learn more about Swarm mode, you can check
|
||||
[this video](https://www.youtube.com/watch?v=EuzoEaE6Cqs)
|
||||
or [these slides](https://container.training/swarm-selfpaced.yml.html).
|
||||
|
||||
---
|
||||
|
||||
|
||||
5
slides/containers/Exercise_Composefile.md
Normal file
5
slides/containers/Exercise_Composefile.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
9
slides/containers/Exercise_Dockerfile_Advanced.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
5
slides/containers/Exercise_Dockerfile_Basic.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
@@ -203,4 +203,90 @@ bash: figlet: command not found
|
||||
|
||||
* The basic Ubuntu image was used, and `figlet` is not here.
|
||||
|
||||
* We will see in the next chapters how to bake a custom image with `figlet`.
|
||||
---
|
||||
|
||||
## Where's my container?
|
||||
|
||||
* Can we reuse that container that we took time to customize?
|
||||
|
||||
*We can, but that's not the default workflow with Docker.*
|
||||
|
||||
* What's the default workflow, then?
|
||||
|
||||
*Always start with a fresh container.*
|
||||
<br/>
|
||||
*If we need something installed in our container, build a custom image.*
|
||||
|
||||
* That seems complicated!
|
||||
|
||||
*We'll see that it's actually pretty easy!*
|
||||
|
||||
* And what's the point?
|
||||
|
||||
*This puts a strong emphasis on automation and repeatability. Let's see why ...*
|
||||
|
||||
---
|
||||
|
||||
## Pets vs. Cattle
|
||||
|
||||
* In the "pets vs. cattle" metaphor, there are two kinds of servers.
|
||||
|
||||
* Pets:
|
||||
|
||||
* have distinctive names and unique configurations
|
||||
|
||||
* when they have an outage, we do everything we can to fix them
|
||||
|
||||
* Cattle:
|
||||
|
||||
* have generic names (e.g. with numbers) and generic configuration
|
||||
|
||||
* configuration is enforced by configuration management, golden images ...
|
||||
|
||||
* when they have an outage, we can replace them immediately with a new server
|
||||
|
||||
* What's the connection with Docker and containers?
|
||||
|
||||
---
|
||||
|
||||
## Local development environments
|
||||
|
||||
* When we use local VMs (with e.g. VirtualBox or VMware), our workflow looks like this:
|
||||
|
||||
* create VM from base template (Ubuntu, CentOS...)
|
||||
|
||||
* install packages, set up environment
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown VM
|
||||
|
||||
* next time we need to work on project, restart VM as we left it
|
||||
|
||||
* if we need to tweak the environment, we do it live
|
||||
|
||||
* Over time, the VM configuration evolves, diverges.
|
||||
|
||||
* We don't have a clean, reliable, deterministic way to provision that environment.
|
||||
|
||||
---
|
||||
|
||||
## Local development with Docker
|
||||
|
||||
* With Docker, the workflow looks like this:
|
||||
|
||||
* create container image with our dev environment
|
||||
|
||||
* run container with that image
|
||||
|
||||
* work on project
|
||||
|
||||
* when done, shutdown container
|
||||
|
||||
* next time we need to work on project, start a new container
|
||||
|
||||
* if we need to tweak the environment, we create a new image
|
||||
|
||||
* We have a clear definition of our environment, and can share it reliably with others.
|
||||
|
||||
* Let's see in the next chapters how to bake a custom image with `figlet`!
|
||||
|
||||
@@ -70,8 +70,9 @@ class: pic
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
* A container is an encapsulated set of processes,
|
||||
|
||||
running in a read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
@@ -177,8 +178,11 @@ Let's explain each of them.
|
||||
|
||||
## Root namespace
|
||||
|
||||
The root namespace is for official images. They are put there by Docker Inc.,
|
||||
but they are generally authored and maintained by third parties.
|
||||
The root namespace is for official images.
|
||||
|
||||
They are gated by Docker Inc.
|
||||
|
||||
They are generally authored and maintained by third parties.
|
||||
|
||||
Those images include:
|
||||
|
||||
@@ -188,7 +192,7 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
* Over 150 at this point!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ In this chapter, we will:
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
@@ -30,27 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- |
|
||||
# Exercise — writing Dockerfiles
|
||||
|
||||
Let's write Dockerfiles for an existing application!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- |
|
||||
# Exercise — writing better Dockerfiles
|
||||
|
||||
Let's update our Dockerfiles to leverage multi-stage builds!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
Use a different tag for these images, so that we can compare their sizes.
|
||||
|
||||
What's the size difference between single-stage and multi-stage builds?
|
||||
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -64,13 +48,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- |
|
||||
# Exercise — writing a Compose file
|
||||
|
||||
Let's write a Compose file for the wordsmith app!
|
||||
|
||||
The code is at: https://github.com/jpetazzo/wordsmith
|
||||
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -30,9 +30,11 @@ chapters:
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
@@ -45,6 +47,7 @@ chapters:
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
|
||||
@@ -407,7 +407,7 @@ class: extra-details
|
||||
|
||||
- We are going to create a service account
|
||||
|
||||
- We will use an existing cluster role (`view`)
|
||||
- We will use a default cluster role (`view`)
|
||||
|
||||
- We will bind together this role and this service account
|
||||
|
||||
@@ -574,6 +574,51 @@ It's important to note a couple of details in these flags ...
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where does this `view` role come from?
|
||||
|
||||
- Kubernetes defines a number of ClusterRoles intended to be bound to users
|
||||
|
||||
- `cluster-admin` can do *everything* (think `root` on UNIX)
|
||||
|
||||
- `admin` can do *almost everything* (except e.g. changing resource quotas and limits)
|
||||
|
||||
- `edit` is similar to `admin`, but cannot view or edit permissions
|
||||
|
||||
- `view` has read-only access to most resources, except permissions and secrets
|
||||
|
||||
*In many situations, these roles will be all you need.*
|
||||
|
||||
*You can also customize them if needed!*
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Customizing the default roles
|
||||
|
||||
- If you need to *add* permissions to these default roles (or others),
|
||||
<br/>
|
||||
you can do it through the [ClusterRole Aggregation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) mechanism
|
||||
|
||||
- This happens by creating a ClusterRole with the following labels:
|
||||
```yaml
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
```
|
||||
|
||||
- This ClusterRole permissions will be added to `admin`/`edit`/`view` respectively
|
||||
|
||||
- This is particulary useful when using CustomResourceDefinitions
|
||||
|
||||
(since Kubernetes cannot guess which resources are sensitive and which ones aren't)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Where do our permissions come from?
|
||||
|
||||
- When interacting with the Kubernetes API, we are using a client certificate
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
- Upgrade kubelet:
|
||||
```bash
|
||||
apt install kubelet=1.14.1-00
|
||||
apt install kubelet=1.14.2-00
|
||||
```
|
||||
|
||||
]
|
||||
@@ -267,7 +267,7 @@
|
||||
|
||||
- Perform the upgrade:
|
||||
```bash
|
||||
sudo kubeadm upgrade apply v1.14.1
|
||||
sudo kubeadm upgrade apply v1.14.2
|
||||
```
|
||||
|
||||
]
|
||||
@@ -287,8 +287,8 @@
|
||||
- Download the configuration on each node, and upgrade kubelet:
|
||||
```bash
|
||||
for N in 1 2 3; do
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.1
|
||||
ssh node $N sudo apt install kubelet=1.14.1-00
|
||||
ssh node$N sudo kubeadm upgrade node config --kubelet-version v1.14.2
|
||||
ssh node $N sudo apt install kubelet=1.14.2-00
|
||||
done
|
||||
```
|
||||
]
|
||||
@@ -297,7 +297,7 @@
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- All our nodes should now be updated to version 1.14.1
|
||||
- All our nodes should now be updated to version 1.14.2
|
||||
|
||||
.exercise[
|
||||
|
||||
|
||||
@@ -69,3 +69,46 @@
|
||||
`Error: release loitering-otter failed: services "hasher" already exists`
|
||||
|
||||
- To avoid naming conflicts, we will deploy the application in another *namespace*
|
||||
|
||||
---
|
||||
|
||||
## Switching to another namespace
|
||||
|
||||
- We can create a new namespace and switch to it
|
||||
|
||||
(Helm will automatically use the namespace specified in our context)
|
||||
|
||||
- We can also tell Helm which namespace to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Helm to use a specific namespace:
|
||||
```bash
|
||||
helm install dockercoins --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=magenta
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=magenta
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
@@ -61,7 +61,7 @@ There are many possibilities!
|
||||
|
||||
- creates a new custom type, `Remote`, exposing a git+ssh server
|
||||
|
||||
- deploy by pushing YAML or Helm Charts to that remote
|
||||
- deploy by pushing YAML or Helm charts to that remote
|
||||
|
||||
- Replacing built-in types with CRDs
|
||||
|
||||
@@ -87,7 +87,11 @@ There are many possibilities!
|
||||
|
||||
(and take action when they are created/updated)
|
||||
|
||||
*Example: [YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml)*
|
||||
*
|
||||
Examples:
|
||||
[YAML to install the gitkube CRD](https://storage.googleapis.com/gitkube/gitkube-setup-stable.yaml),
|
||||
[YAML to install a redis operator CRD](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml)
|
||||
*
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -234,6 +234,6 @@
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm Charts
|
||||
- Gitkube can also deploy Helm charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
|
||||
@@ -276,3 +276,21 @@ error: the server doesn't have a resource type "endpoint"
|
||||
- There is no `endpoint` object: `type Endpoints struct`
|
||||
|
||||
- The type doesn't represent a single endpoint, but a list of endpoints
|
||||
|
||||
---
|
||||
|
||||
## Exposing services to the outside world
|
||||
|
||||
- The default type (ClusterIP) only works for internal traffic
|
||||
|
||||
- If we want to accept external traffic, we can use one of these:
|
||||
|
||||
- NodePort (expose a service on a TCP port between 30000-32768)
|
||||
|
||||
- LoadBalancer (provision a cloud load balancer for our service)
|
||||
|
||||
- ExternalIP (use one node's external IP address)
|
||||
|
||||
- Ingress (a special mechanism for HTTP services)
|
||||
|
||||
*We'll see NodePorts and Ingresses more in detail later.*
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
|
||||
## Introspection vs. documentation
|
||||
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/)
|
||||
- We can access the same information by reading the [API documentation](https://kubernetes.io/docs/reference/#api-reference)
|
||||
|
||||
- The API documentation is usually easier to read, but:
|
||||
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
|
||||
## Differences with Helm
|
||||
|
||||
- Helm Charts use placeholders `{{ like.this }}`
|
||||
- Helm charts use placeholders `{{ like.this }}`
|
||||
|
||||
- Kustomize "bases" are standard Kubernetes YAML
|
||||
|
||||
- It is possible to use an existing set of YAML as a Kustomize base
|
||||
|
||||
- As a result, writing a Helm Chart is more work ...
|
||||
- As a result, writing a Helm chart is more work ...
|
||||
|
||||
- ... But Helm Charts are also more powerful; e.g. they can:
|
||||
- ... But Helm charts are also more powerful; e.g. they can:
|
||||
|
||||
- use flags to conditionally include resources or blocks
|
||||
|
||||
@@ -88,11 +88,11 @@
|
||||
|
||||
- Change to a new directory:
|
||||
```bash
|
||||
mkdir ~/kubercoins
|
||||
cd ~/kubercoins
|
||||
mkdir ~/kustomcoins
|
||||
cd ~/kustomcoins
|
||||
```
|
||||
|
||||
- Run `ship init` with the kubercoins repository:
|
||||
- Run `ship init` with the kustomcoins repository:
|
||||
```bash
|
||||
ship init https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
@@ -146,3 +146,49 @@
|
||||
|
||||
- We will create a new copy of DockerCoins in another namespace
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace kustomcoins
|
||||
```
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking our new copy of DockerCoins
|
||||
|
||||
- We can check the worker logs, or the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the NodePort number of the web UI:
|
||||
```bash
|
||||
kubectl get service webui --namespace=kustomcoins
|
||||
```
|
||||
|
||||
- Open it in a web browser
|
||||
|
||||
- Look at the worker logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: it might take a minute or two for the worker to start.
|
||||
|
||||
@@ -120,7 +120,7 @@
|
||||
|
||||
- Team "build" ships ready-to-run manifests
|
||||
|
||||
(YAML, Helm Charts, Kustomize ...)
|
||||
(YAML, Helm charts, Kustomize ...)
|
||||
|
||||
- Team "run" adjusts some parameters and monitors the application
|
||||
|
||||
|
||||
244
slides/k8s/local-persistent-volumes.md
Normal file
244
slides/k8s/local-persistent-volumes.md
Normal file
@@ -0,0 +1,244 @@
|
||||
# Local Persistent Volumes
|
||||
|
||||
- We want to run that Consul cluster *and* actually persist data
|
||||
|
||||
- But we don't have a distributed storage system
|
||||
|
||||
- We are going to use local volumes instead
|
||||
|
||||
(similar conceptually to `hostPath` volumes)
|
||||
|
||||
- We can use local volumes without installing extra plugins
|
||||
|
||||
- However, they are tied to a node
|
||||
|
||||
- If that node goes down, the volume becomes unavailable
|
||||
|
||||
---
|
||||
|
||||
## With or without dynamic provisioning
|
||||
|
||||
- We will deploy a Consul cluster *with* persistence
|
||||
|
||||
- That cluster's StatefulSet will create PVCs
|
||||
|
||||
- These PVCs will remain unbound¹, until we will create local volumes manually
|
||||
|
||||
(we will basically do the job of the dynamic provisioner)
|
||||
|
||||
- Then, we will see how to automate that with a dynamic provisioner
|
||||
|
||||
.footnote[¹Unbound = without an associated Persistent Volume.]
|
||||
|
||||
---
|
||||
|
||||
## Work in a separate namespace
|
||||
|
||||
- To avoid conflicts with existing resources, let's create and use a new namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new namespace:
|
||||
```bash
|
||||
kubectl create namespace orange
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns orange
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
.warning[Make sure to call that namespace `orange`, because that name is hardcoded in the YAML files.]
|
||||
|
||||
---
|
||||
|
||||
## Deploying Consul
|
||||
|
||||
- We will use a slightly different YAML file
|
||||
|
||||
- The only differences between that file and the previous one are:
|
||||
|
||||
- `volumeClaimTemplate` defined in the Stateful Set spec
|
||||
|
||||
- the corresponding `volumeMounts` in the Pod spec
|
||||
|
||||
- the namespace `orange` used for discovery of Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the persistent Consul YAML file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/persistent-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Observing the situation
|
||||
|
||||
- Let's look at Persistent Volume Claims and Pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we now have an unbound Persistent Volume Claim:
|
||||
```bash
|
||||
kubectl get pvc
|
||||
```
|
||||
|
||||
- We don't have any Persistent Volume:
|
||||
```bash
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
- The Pod `consul-0` is not scheduled yet:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
*Hint: leave these commands running with `-w` in different windows.*
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- In a Stateful Set, the Pods are started one by one
|
||||
|
||||
- `consul-1` won't be created until `consul-0` is running
|
||||
|
||||
- `consul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
|
||||
- The scheduler won't schedule the Pod until the PVC is bound
|
||||
|
||||
(because the PVC might be bound to a volume that is only available on a subset of nodes; for instance EBS are tied to an availability zone)
|
||||
|
||||
---
|
||||
|
||||
## Creating Persistent Volumes
|
||||
|
||||
- Let's create 3 local directories (`/mnt/consul`) on node2, node3, node4
|
||||
|
||||
- Then create 3 Persistent Volumes corresponding to these directories
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the local directories:
|
||||
```bash
|
||||
for NODE in node2 node3 node4; do
|
||||
ssh $NODE sudo mkdir -p /mnt/consul
|
||||
done
|
||||
```
|
||||
|
||||
- Create the PV objects:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/volumes-for-consul.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check our Consul cluster
|
||||
|
||||
- The PVs that we created will be automatically matched with the PVCs
|
||||
|
||||
- Once a PVC is bound, its pod can start normally
|
||||
|
||||
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
|
||||
|
||||
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that our Consul clusters has 3 members indeed:
|
||||
```bash
|
||||
kubectl exec consul-0 consul members
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (1/2)
|
||||
|
||||
- The size of the Persistent Volumes is bogus
|
||||
|
||||
(it is used when matching PVs and PVCs together, but there is no actual quota or limit)
|
||||
|
||||
---
|
||||
|
||||
## Devil is in the details (2/2)
|
||||
|
||||
- This specific example worked because we had exactly 1 free PV per node:
|
||||
|
||||
- if we had created multiple PVs per node ...
|
||||
|
||||
- we could have ended with two PVCs bound to PVs on the same node ...
|
||||
|
||||
- which would have required two pods to be on the same node ...
|
||||
|
||||
- which is forbidden by the anti-affinity constraints in the StatefulSet
|
||||
|
||||
- To avoid that, we need to associated the PVs with a Storage Class that has:
|
||||
```yaml
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
(this means that a PVC will be bound to a PV only after being used by a Pod)
|
||||
|
||||
- See [this blog post](https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/) for more details
|
||||
|
||||
---
|
||||
|
||||
## Bulk provisioning
|
||||
|
||||
- It's not practical to manually create directories and PVs for each app
|
||||
|
||||
- We *could* pre-provision a number of PVs across our fleet
|
||||
|
||||
- We could even automate that with a Daemon Set:
|
||||
|
||||
- creating a number of directories on each node
|
||||
|
||||
- creating the corresponding PV objects
|
||||
|
||||
- We also need to recycle volumes
|
||||
|
||||
- ... This can quickly get out of hand
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning
|
||||
|
||||
- We could also write our own provisioner, which would:
|
||||
|
||||
- watch the PVCs across all namespaces
|
||||
|
||||
- when a PVC is created, create a corresponding PV on a node
|
||||
|
||||
- Or we could use one of the dynamic provisioners for local persistent volumes
|
||||
|
||||
(for instance the [Rancher local path provisioner](https://github.com/rancher/local-path-provisioner))
|
||||
|
||||
---
|
||||
|
||||
## Strategies for local persistent volumes
|
||||
|
||||
- Remember, when a node goes down, the volumes on that node become unavailable
|
||||
|
||||
- High availability will require another layer of replication
|
||||
|
||||
(like what we've just seen with Consul; or primary/secondary; etc)
|
||||
|
||||
- Pre-provisioning PVs makes sense for machines with local storage
|
||||
|
||||
(e.g. cloud instance storage; or storage directly attached to a physical machine)
|
||||
|
||||
- Dynamic provisioning makes sense for large number of applications
|
||||
|
||||
(when we can't or won't dedicate a whole disk to a volume)
|
||||
|
||||
- It's possible to mix both (using distinct Storage Classes)
|
||||
@@ -6,6 +6,24 @@
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
.warning[The exercises in this chapter should be done *on your local machine*.]
|
||||
|
||||
- `kubectl` is officially available on Linux, macOS, Windows
|
||||
|
||||
(and unofficially anywhere we can build and run Go binaries)
|
||||
|
||||
- You may skip these exercises if you are following along from:
|
||||
|
||||
- a tablet or phone
|
||||
|
||||
- a web-based terminal
|
||||
|
||||
- an environment where you can't install and run new binaries
|
||||
|
||||
---
|
||||
|
||||
## Installing `kubectl`
|
||||
|
||||
- If you already have `kubectl` on your local machine, you can skip this
|
||||
@@ -16,11 +34,11 @@
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl)
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/darwin/amd64/kubectl)
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/windows/amd64/kubectl.exe)
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
@@ -65,9 +83,16 @@ Platform:"linux/amd64"}
|
||||
|
||||
- If you never used `kubectl` on your machine before: nothing to do!
|
||||
|
||||
- If you already used `kubectl` to control a Kubernetes cluster before:
|
||||
.exercise[
|
||||
|
||||
- rename `~/.kube/config` to e.g. `~/.kube/config.bak`
|
||||
- Make a copy of `~/.kube/config`; if you are using macOS or Linux, you can do:
|
||||
```bash
|
||||
cp ~/.kube/config ~/.kube/config.before.training
|
||||
```
|
||||
|
||||
- If you are using Windows, you will need to adapt this command
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,26 +1,65 @@
|
||||
# Namespaces
|
||||
|
||||
- We would like to deploy another copy of DockerCoins on our cluster
|
||||
|
||||
- We could rename all our deployments and services:
|
||||
|
||||
hasher → hasher2, redis → redis2, rng → rng2, etc.
|
||||
|
||||
- That would require updating the code
|
||||
|
||||
- There as to be a better way!
|
||||
|
||||
--
|
||||
|
||||
- As hinted by the title of this section, we will use *namespaces*
|
||||
|
||||
---
|
||||
|
||||
## Identifying a resource
|
||||
|
||||
- We cannot have two resources with the same name
|
||||
|
||||
(Or can we...?)
|
||||
(or can we...?)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources *of the same type* with the same name
|
||||
- We cannot have two resources *of the same kind* with the same name
|
||||
|
||||
(But it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set!)
|
||||
(but it's OK to have a `rng` service, a `rng` deployment, and a `rng` daemon set)
|
||||
|
||||
--
|
||||
|
||||
- We cannot have two resources of the same type with the same name *in the same namespace*
|
||||
- We cannot have two resources of the same kind with the same name *in the same namespace*
|
||||
|
||||
(But it's OK to have e.g. two `rng` services in different namespaces!)
|
||||
(but it's OK to have e.g. two `rng` services in different namespaces)
|
||||
|
||||
--
|
||||
|
||||
- In other words: **the tuple *(type, name, namespace)* needs to be unique**
|
||||
- Except for resources that exist at the *cluster scope*
|
||||
|
||||
(In the resource YAML, the type is called `Kind`)
|
||||
(these do not belong to a namespace)
|
||||
|
||||
---
|
||||
|
||||
## Uniquely identifying a resource
|
||||
|
||||
- For *namespaced* resources:
|
||||
|
||||
the tuple *(kind, name, namespace)* needs to be unique
|
||||
|
||||
- For resources at the *cluster scope*:
|
||||
|
||||
the tuple *(kind, name)* needs to be unique
|
||||
|
||||
.exercise[
|
||||
|
||||
- List resource types again, and check the NAMESPACED column:
|
||||
```bash
|
||||
kubectl api-resources
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -59,7 +98,7 @@
|
||||
|
||||
- The two methods above are identical
|
||||
|
||||
- If we are using a tool like Helm, it will create namespaces automatically
|
||||
- Some tools like Helm will create namespaces automatically when needed
|
||||
|
||||
---
|
||||
|
||||
@@ -168,41 +207,27 @@
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Helm
|
||||
## Deploying DockerCoins with YAML files
|
||||
|
||||
*Follow these instructions if you previously created a Helm Chart.*
|
||||
- The GitHub repository `jpetazzo/kubercoins` contains everything we need!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
- Clone the kubercoins repository:
|
||||
```bash
|
||||
helm install dockercoins
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Create all the DockerCoins resources:
|
||||
```bash
|
||||
kubectl create -f kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
In the last command line, `dockercoins` is just the local path where
|
||||
we created our Helm chart before.
|
||||
If the argument behind `-f` is a directory, all the files in that directory are processed.
|
||||
|
||||
---
|
||||
|
||||
## Deploy DockerCoins with Kustomize
|
||||
|
||||
*Follow these instructions if you previously created a Kustomize overlay.*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f rendered.yaml
|
||||
```
|
||||
|
||||
- Or, with Kubernetes 1.14, you can also do this:
|
||||
```bash
|
||||
kubectl apply -k overlays/ship
|
||||
```
|
||||
|
||||
]
|
||||
The subdirectories are *not* processed, unless we also add the `-R` flag.
|
||||
|
||||
---
|
||||
|
||||
@@ -221,46 +246,7 @@ we created our Helm chart before.
|
||||
|
||||
]
|
||||
|
||||
If the graph shows up but stays at zero, check the next slide!
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If did the exercices from the chapter about labels and selectors,
|
||||
the app that you just created may not work, because the `rng` service
|
||||
selector has `enabled=yes` but the pods created by the `rng` daemon set
|
||||
do not have that label.
|
||||
|
||||
How can we troubleshoot that?
|
||||
|
||||
- Query individual services manually
|
||||
|
||||
→ the `rng` service will time out
|
||||
|
||||
- Inspect the services with `kubectl describe service`
|
||||
|
||||
→ the `rng` service will have an empty list of backends
|
||||
|
||||
---
|
||||
|
||||
## Fixing the broken service
|
||||
|
||||
The easiest option is to add the `enabled=yes` label to the relevant pods.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Add the `enabled` label to the pods of the `rng` daemon set:
|
||||
```bash
|
||||
kubectl label pods -l app=rng enabled=yes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The *best* option is to change either the service definition, or the
|
||||
daemon set definition, so that their respective selectors match correctly.
|
||||
|
||||
*This is left as an exercise for the reader!*
|
||||
If the graph shows up but stays at zero, give it a minute or two!
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
- an *alert manager* to notify us according to metrics values or trends
|
||||
|
||||
- We are going to deploy it on our Kubernetes cluster and see how to query it
|
||||
- We are going to use it to collect and query some metrics on our Kubernetes cluster
|
||||
|
||||
---
|
||||
|
||||
@@ -145,7 +145,28 @@ scrape_configs:
|
||||
|
||||
(it will even be gentler on the I/O subsystem since it needs to write less)
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
- Would you like to know more? Check this video:
|
||||
|
||||
[Storage in Prometheus 2.0](https://www.youtube.com/watch?v=C4YV-9CrawA) by [Goutham V](https://twitter.com/putadent) at DC17EU
|
||||
|
||||
---
|
||||
|
||||
## Checking if Prometheus is installed
|
||||
|
||||
- Before trying to install Prometheus, let's check if it's already there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look for services with a label `app=prometheus` across all namespaces:
|
||||
```bash
|
||||
kubectl get services --selector=app=prometheus --all-namespaces
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we see a `NodePort` service called `prometheus-server`, we're good!
|
||||
|
||||
(We can then skip to "Connecting to the Prometheus web UI".)
|
||||
|
||||
---
|
||||
|
||||
@@ -169,11 +190,11 @@ We need to:
|
||||
|
||||
---
|
||||
|
||||
## Helm Charts to the rescue
|
||||
## Helm charts to the rescue
|
||||
|
||||
- To make our lives easier, we are going to use a Helm Chart
|
||||
- To make our lives easier, we are going to use a Helm chart
|
||||
|
||||
- The Helm Chart will take care of all the steps explained above
|
||||
- The Helm chart will take care of all the steps explained above
|
||||
|
||||
(including some extra features that we don't need, but won't hurt)
|
||||
|
||||
@@ -210,20 +231,41 @@ We need to:
|
||||
|
||||
- Install Prometheus on our cluster:
|
||||
```bash
|
||||
helm install stable/prometheus \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.persistentVolume.enabled=false
|
||||
helm upgrade prometheus stable/prometheus \
|
||||
--install \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The provided flags:
|
||||
Curious about all these flags? They're explained in the next slide.
|
||||
|
||||
- expose the server web UI (and API) on a NodePort
|
||||
---
|
||||
|
||||
- use an ephemeral volume for metrics storage
|
||||
<br/>
|
||||
(instead of requesting a Persistent Volume through a Persistent Volume Claim)
|
||||
class: extra-details
|
||||
|
||||
## Explaining all the Helm flags
|
||||
|
||||
- `helm upgrade prometheus` → upgrade release "prometheus" to the latest version ...
|
||||
|
||||
(a "release" is a unique name given to an app deployed with Helm)
|
||||
|
||||
- `stable/prometheus` → ... of the chart `prometheus` in repo `stable`
|
||||
|
||||
- `--install` → if the app doesn't exist, create it
|
||||
|
||||
- `--namespace kube-system` → put it in that specific namespace
|
||||
|
||||
- And set the following *values* when rendering the chart's templates:
|
||||
|
||||
- `server.service.type=NodePort` → expose the Prometheus server with a NodePort
|
||||
- `server.service.nodePort=30090` → set the specific NodePort number to use
|
||||
- `server.persistentVolume.enabled=false` → do not use a PersistentVolumeClaim
|
||||
- `alertmanager.enabled=false` → disable the alert manager entirely
|
||||
|
||||
---
|
||||
|
||||
@@ -235,7 +277,7 @@ The provided flags:
|
||||
|
||||
- Figure out the NodePort that was allocated to the Prometheus server:
|
||||
```bash
|
||||
kubectl get svc | grep prometheus-server
|
||||
kubectl get svc --all-namespaces | grep prometheus-server
|
||||
```
|
||||
|
||||
- With your browser, connect to that port
|
||||
@@ -292,7 +334,7 @@ This query will show us CPU usage across all containers:
|
||||
container_cpu_usage_seconds_total
|
||||
```
|
||||
|
||||
- The suffix of the metrics name tells us:
|
||||
- The suffix of the metrics name tells us:
|
||||
|
||||
- the unit (seconds of CPU)
|
||||
|
||||
@@ -486,3 +528,21 @@ class: extra-details
|
||||
- see [this comment](https://github.com/prometheus/prometheus/issues/2204#issuecomment-261515520) for an overview
|
||||
|
||||
- or [this blog post](https://5pi.de/2017/11/09/use-prometheus-vector-matching-to-get-kubernetes-utilization-across-any-pod-label/) for a complete description of the process
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- Grafana is a beautiful (and useful) frontend to display all kinds of graphs
|
||||
|
||||
- Not everyone needs to know Prometheus, PromQL, Grafana, etc.
|
||||
|
||||
- But in a team, it is valuable to have at least one person who know them
|
||||
|
||||
- That person can set up queries and dashboards for the rest of the team
|
||||
|
||||
- It's a little bit likeknowing how to optimize SQL queries, Dockerfiles ...
|
||||
|
||||
Don't panic if you don't know these tools!
|
||||
|
||||
... But make sure at least one person in your team is on it 💯
|
||||
|
||||
@@ -34,13 +34,13 @@
|
||||
|
||||
- Each pod can discover the IP address of the others easily
|
||||
|
||||
- The pods can have persistent volumes attached to them
|
||||
- The pods can persist data on attached volumes
|
||||
|
||||
🤔 Wait a minute ... Can't we already attach volumes to pods and deployments?
|
||||
|
||||
---
|
||||
|
||||
## Volumes and Persistent Volumes
|
||||
## Revisiting volumes
|
||||
|
||||
- [Volumes](https://kubernetes.io/docs/concepts/storage/volumes/) are used for many purposes:
|
||||
|
||||
@@ -50,13 +50,13 @@
|
||||
|
||||
- accessing storage systems
|
||||
|
||||
- The last type of volumes is known as a "Persistent Volume"
|
||||
- Let's see examples of the latter usage
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volumes types
|
||||
## Volumes types
|
||||
|
||||
- There are many [types of Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes) available:
|
||||
- There are many [types of volumes](https://kubernetes.io/docs/concepts/storage/volumes/#types-of-volumes) available:
|
||||
|
||||
- public cloud storage (GCEPersistentDisk, AWSElasticBlockStore, AzureDisk...)
|
||||
|
||||
@@ -74,7 +74,7 @@
|
||||
|
||||
---
|
||||
|
||||
## Using a Persistent Volume
|
||||
## Using a cloud volume
|
||||
|
||||
Here is a pod definition using an AWS EBS volume (that has to be created first):
|
||||
|
||||
@@ -99,7 +99,32 @@ spec:
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of Persistent Volumes
|
||||
## Using an NFS volume
|
||||
|
||||
Here is another example using a volume on an NFS server:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-using-my-nfs-volume
|
||||
spec:
|
||||
containers:
|
||||
- image: ...
|
||||
name: container-using-my-nfs-volume
|
||||
volumeMounts:
|
||||
- mountPath: /my-nfs
|
||||
name: my-nfs-volume
|
||||
volumes:
|
||||
- name: my-nfs-volume
|
||||
nfs:
|
||||
server: 192.168.0.55
|
||||
path: "/exports/assets"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Shortcomings of volumes
|
||||
|
||||
- Their lifecycle (creation, deletion...) is managed outside of the Kubernetes API
|
||||
|
||||
@@ -125,17 +150,47 @@ spec:
|
||||
|
||||
- This type is a *Persistent Volume Claim*
|
||||
|
||||
- A Persistent Volume Claim (PVC) is a resource type
|
||||
|
||||
(visible with `kubectl get persistentvolumeclaims` or `kubectl get pvc`)
|
||||
|
||||
- A PVC is not a volume; it is a *request for a volume*
|
||||
|
||||
---
|
||||
|
||||
## Persistent Volume Claims in practice
|
||||
|
||||
- Using a Persistent Volume Claim is a two-step process:
|
||||
|
||||
- creating the claim
|
||||
|
||||
- using the claim in a pod (as if it were any other kind of volume)
|
||||
|
||||
- Between these two steps, something will happen behind the scenes:
|
||||
- A PVC starts by being Unbound (without an associated volume)
|
||||
|
||||
- Kubernetes will associate an existing volume with the claim
|
||||
- Once it is associated with a Persistent Volume, it becomes Bound
|
||||
|
||||
- ... or dynamically create a volume if possible and necessary
|
||||
- A Pod referring an unbound PVC will not start
|
||||
|
||||
(but as soon as the PVC is bound, the Pod can start)
|
||||
|
||||
---
|
||||
|
||||
## Binding PV and PVC
|
||||
|
||||
- A Kubernetes controller continuously watches PV and PVC objects
|
||||
|
||||
- When it notices an unbound PVC, it tries to find a satisfactory PV
|
||||
|
||||
("satisfactory" in terms of size and other characteristics; see next slide)
|
||||
|
||||
- If no PV fits the PVC, a PV can be created dynamically
|
||||
|
||||
(this requires to configure a *dynamic provisioner*, more on that later)
|
||||
|
||||
- Otherwise, the PVC remains unbound indefinitely
|
||||
|
||||
(until we manually create a PV or setup dynamic provisioning)
|
||||
|
||||
---
|
||||
|
||||
@@ -147,7 +202,9 @@ spec:
|
||||
|
||||
- the access mode (e.g. "read-write by a single pod")
|
||||
|
||||
- It can also give extra details, like:
|
||||
- Optionally, it can also specify a Storage Class
|
||||
|
||||
- The Storage Class indicates:
|
||||
|
||||
- which storage system to use (e.g. Portworx, EBS...)
|
||||
|
||||
@@ -155,8 +212,6 @@ spec:
|
||||
|
||||
e.g.: "replicate the data 3 times, and use SSD media"
|
||||
|
||||
- The extra details are provided by specifying a Storage Class
|
||||
|
||||
---
|
||||
|
||||
## What's a Storage Class?
|
||||
@@ -167,15 +222,15 @@ spec:
|
||||
|
||||
- It indicates which *provisioner* to use
|
||||
|
||||
(which controller will create the actual volume)
|
||||
|
||||
- And arbitrary parameters for that provisioner
|
||||
|
||||
(replication levels, type of disk ... anything relevant!)
|
||||
|
||||
- It is necessary to define a Storage Class to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
- Storage Classes are required if we want to use [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)
|
||||
|
||||
- Conversely, it is not necessary to define one if you will create volumes manually
|
||||
|
||||
(we will see dynamic provisioning in action later)
|
||||
(but we can also create volumes manually, and ignore Storage Classes)
|
||||
|
||||
---
|
||||
|
||||
@@ -200,7 +255,7 @@ spec:
|
||||
|
||||
## Using a Persistent Volume Claim
|
||||
|
||||
Here is the same definition as earlier, but using a PVC:
|
||||
Here is a Pod definition like the ones shown earlier, but using a PVC:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -212,7 +267,7 @@ spec:
|
||||
- image: ...
|
||||
name: container-using-a-claim
|
||||
volumeMounts:
|
||||
- mountPath: /my-ebs
|
||||
- mountPath: /my-vol
|
||||
name: my-volume
|
||||
volumes:
|
||||
- name: my-volume
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Versions installed
|
||||
|
||||
- Kubernetes 1.14.1
|
||||
- Docker Engine 18.09.5
|
||||
- Kubernetes 1.14.2
|
||||
- Docker Engine 18.09.6
|
||||
- Docker Compose 1.21.1
|
||||
|
||||
<!-- ##VERSION## -->
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Kubernetes volumes vs. Docker volumes
|
||||
|
||||
- Kubernetes and Docker volumes are very similar
|
||||
@@ -35,13 +37,35 @@
|
||||
- Kubernetes volumes are also used to expose configuration and secrets
|
||||
|
||||
- Docker has specific concepts for configuration and secrets
|
||||
|
||||
<br/>
|
||||
(but under the hood, the technical implementation is similar)
|
||||
|
||||
- If you're not familiar with Docker volumes, you can safely ignore this slide!
|
||||
|
||||
---
|
||||
|
||||
## Volumes ≠ Persistent Volumes
|
||||
|
||||
- Volumes and Persistent Volumes are related, but very different!
|
||||
|
||||
- *Volumes*:
|
||||
|
||||
- appear in Pod specifications (see next slide)
|
||||
|
||||
- do not exist as API resources (**cannot** do `kubectl get volumes`)
|
||||
|
||||
- *Persistent Volumes*:
|
||||
|
||||
- are API resources (**can** do `kubectl get persistentvolumes`)
|
||||
|
||||
- correspond to concrete volumes (e.g. on a SAN, EBS, etc.)
|
||||
|
||||
- cannot be associated to a Pod directly; but through a Persistent Volume Claim
|
||||
|
||||
- won't be discussed further in this section
|
||||
|
||||
---
|
||||
|
||||
## A simple volume example
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -132,6 +132,8 @@ And *then* it is time to look at orchestration!
|
||||
|
|
||||
[Persistent Volumes](kube-selfpaced.yml.html#toc-highly-available-persistent-volumes)
|
||||
|
||||
- Excellent [blog post](http://www.databasesoup.com/2018/07/should-i-run-postgres-on-kubernetes.html) tackling the question: “Should I run Postgres on Kubernetes?”
|
||||
|
||||
---
|
||||
|
||||
## HTTP traffic handling
|
||||
|
||||
@@ -20,54 +20,57 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# - shared/composescale.md
|
||||
# - shared/hastyconclusions.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
# - k8s/buildshiprun-selfhosted.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
# - k8s/kubectlproxy.md
|
||||
# - k8s/localkubeconfig.md
|
||||
# - k8s/accessinternal.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
# - k8s/kubectlscale.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
# - k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- - k8s/helm.md
|
||||
# - k8s/create-chart.md
|
||||
# - k8s/kustomize.md
|
||||
# - k8s/namespaces.md
|
||||
# - k8s/netpol.md
|
||||
# - k8s/authn-authz.md
|
||||
# - k8s/podsecuritypolicy.md
|
||||
#- - k8s/ingress.md
|
||||
# - k8s/gitworkflows.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- - k8s/volumes.md
|
||||
# - k8s/build-with-docker.md
|
||||
# - k8s/build-with-kaniko.md
|
||||
# - k8s/configuration.md
|
||||
#- - k8s/owners-and-dependents.md
|
||||
# - k8s/extending-api.md
|
||||
# - k8s/statefulsets.md
|
||||
# - k8s/portworx.md
|
||||
#- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
@@ -22,6 +22,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
@@ -53,10 +54,10 @@ chapters:
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
#- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
@@ -46,14 +47,14 @@ chapters:
|
||||
# - k8s/scalingdockercoins.md
|
||||
# - shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/rollout.md
|
||||
- - k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
@@ -67,6 +68,7 @@ chapters:
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
- k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
|
||||
@@ -20,6 +20,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
@@ -28,8 +29,8 @@ chapters:
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
@@ -47,14 +48,14 @@ chapters:
|
||||
- shared/hastyconclusions.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- k8s/kustomize.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/netpol.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
@@ -65,10 +66,11 @@ chapters:
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
#- k8s/extending-api.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
|
||||
137
slides/shared/connecting.md
Normal file
137
slides/shared/connecting.md
Normal file
@@ -0,0 +1,137 @@
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
@@ -169,143 +169,3 @@ class: in-person, extra-details
|
||||
- It requires UDP ports to be open
|
||||
|
||||
(By default, it uses a UDP port between 60000 and 61000)
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Connecting to our lab environment
|
||||
|
||||
.exercise[
|
||||
|
||||
- Log into the first VM (`node1`) with your SSH client
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(awk '/\Wnode/{print $2}' /etc/hosts); do
|
||||
ssh -o StrictHostKeyChecking=no $N true
|
||||
done
|
||||
```
|
||||
|
||||
```bash
|
||||
if which kubectl; then
|
||||
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
|
||||
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
|
||||
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to `node1`
|
||||
|
||||
<!-- ```bash exit``` -->
|
||||
|
||||
]
|
||||
|
||||
If anything goes wrong — ask for help!
|
||||
|
||||
---
|
||||
|
||||
## Doing or re-doing the workshop on your own?
|
||||
|
||||
- Use something like
|
||||
[Play-With-Docker](http://play-with-docker.com/) or
|
||||
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
|
||||
|
||||
Zero setup effort; but environment are short-lived and
|
||||
might have limited resources
|
||||
|
||||
- Create your own cluster (local or cloud VMs)
|
||||
|
||||
Small setup effort; small cost; flexible environments
|
||||
|
||||
- Create a bunch of clusters for you and your friends
|
||||
([instructions](https://@@GITREPO@@/tree/master/prepare-vms))
|
||||
|
||||
Bigger setup effort; ideal for group training
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Get your own Docker nodes
|
||||
|
||||
- If you already have some Docker nodes: great!
|
||||
|
||||
- If not: let's get some thanks to Play-With-Docker
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to http://www.play-with-docker.com/
|
||||
|
||||
- Log in
|
||||
|
||||
- Create your first node
|
||||
|
||||
<!-- ```open http://www.play-with-docker.com/``` -->
|
||||
|
||||
]
|
||||
|
||||
You will need a Docker ID to use Play-With-Docker.
|
||||
|
||||
(Creating a Docker ID is free.)
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
*These remarks apply only when using multiple nodes, of course.*
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- During normal operations, we do not need access to the other nodes
|
||||
|
||||
- If we had to troubleshoot issues, we would use a combination of:
|
||||
|
||||
- SSH (to access system logs, daemon status...)
|
||||
|
||||
- Docker API (to check running containers and container engine status)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
[Tmux](https://en.wikipedia.org/wiki/Tmux) is a terminal multiplexer like `screen`.
|
||||
|
||||
*You don't have to use it or even know about it to follow along.
|
||||
<br/>
|
||||
But some of us like to use it to switch between terminals.
|
||||
<br/>
|
||||
It has been preinstalled on your workshop nodes.*
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
|
||||
@@ -24,6 +24,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
|
||||
@@ -24,6 +24,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
|
||||
@@ -19,6 +19,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
@@ -19,6 +19,7 @@ chapters:
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
Reference in New Issue
Block a user