Compare commits

..

16 Commits

Author SHA1 Message Date
Jerome Petazzoni
08fa37dace fix-redirects.sh: adding forced redirect 2020-04-07 16:57:19 -05:00
Jerome Petazzoni
807028cbf3 Remove WiFi warning 2019-06-13 10:51:44 -05:00
Jerome Petazzoni
dfde597cb9 Merge branch 'master' into sfsf-2019-06 2019-06-13 10:51:22 -05:00
Jerome Petazzoni
96419c6baf test→node 2019-06-12 21:35:12 -05:00
Jerome Petazzoni
12da011f21 Customize logistics etc 2019-06-12 21:13:00 -05:00
Jerome Petazzoni
fa1637fb7e Add Helm charts and reorg content 2019-06-12 21:07:55 -05:00
Jerome Petazzoni
fbe2251e21 Merge remote-tracking branch 'origin/make-chart' into sfsf-2019-06 2019-06-12 21:07:12 -05:00
Jerome Petazzoni
bac0d9febd Add a more meaningful exercise with Helm charts 2019-06-12 21:05:47 -05:00
Jerome Petazzoni
b4faf10581 merge 2019-06-12 16:43:24 -05:00
Jerome Petazzoni
0ef9c87f97 Merge branch 'master' into sfsf-2019-06 2019-06-12 16:04:36 -05:00
Jerome Petazzoni
addd14582a merge 2019-06-09 18:41:04 -05:00
Jerome Petazzoni
5299fbaab5 merge 2019-06-02 19:32:20 -05:00
Jerome Petazzoni
398ff5ee4f merge 2019-06-02 16:48:30 -05:00
Jerome Petazzoni
b883e6d557 Prepare SFSF training 2019-06-02 16:47:53 -05:00
Jerome Petazzoni
42ed6fc56a Tweaks 2019-06-02 09:55:50 -05:00
Jerome Petazzoni
f4ef2bd6d4 Add control plane auth info 2019-05-27 15:39:12 -05:00
25 changed files with 735 additions and 226 deletions

View File

@@ -31,7 +31,6 @@ infra_start() {
die "I could not find which AMI to use in this region. Try another region?"
fi
AWS_KEY_NAME=$(make_key_name)
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
sep "Starting instances"
info " Count: $COUNT"
@@ -39,11 +38,10 @@ infra_start() {
info " Token/tag: $TAG"
info " AMI: $AMI"
info " Key name: $AWS_KEY_NAME"
info " Instance type: $AWS_INSTANCE_TYPE"
result=$(aws ec2 run-instances \
--key-name $AWS_KEY_NAME \
--count $COUNT \
--instance-type $AWS_INSTANCE_TYPE \
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
--client-token $TAG \
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
--image-id $AMI)
@@ -99,7 +97,7 @@ infra_disableaddrchecks() {
}
wait_until_tag_is_running() {
max_retry=100
max_retry=50
i=0
done_count=0
while [[ $done_count -lt $COUNT ]]; do

View File

@@ -30,9 +30,9 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
SETTINGS=admin-kuberouter
@@ -43,15 +43,11 @@ TAG=$PREFIX-$SETTINGS
--settings settings/$SETTINGS.yaml \
--count $((3*$STUDENTS))
./workshopctl disableaddrchecks $TAG
./workshopctl deploy $TAG
./workshopctl kubebins $TAG
./workshopctl disableaddrchecks $TAG
./workshopctl cards $TAG
#INFRA=infra/aws-us-west-1
export AWS_INSTANCE_TYPE=t3a.medium
SETTINGS=admin-test
TAG=$PREFIX-$SETTINGS
./workshopctl start \
@@ -63,4 +59,3 @@ TAG=$PREFIX-$SETTINGS
./workshopctl deploy $TAG
./workshopctl kube $TAG 1.13.5
./workshopctl cards $TAG

View File

@@ -2,4 +2,4 @@
#/ /kube-halfday.yml.html 200
#/ /kube-fullday.yml.html 200
#/ /kube-twodays.yml.html 200
/ /alfun.html 200!
/ /sfsf.yml.html 200!

View File

@@ -1,62 +0,0 @@
title: |
Containers,
Docker,
Kubernetes
(Partie 1)
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://alfun-2019-06.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
# DAY 1
- - containers/Docker_Overview.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- - containers/Start_And_Attach.md
- containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- - containers/Copying_Files_During_Build.md
- containers/Exercise_Dockerfile_Basic.md
- containers/Publishing_To_Docker_Hub.md
- containers/Multi_Stage_Builds.md
- containers/Dockerfile_Tips.md
- containers/Exercise_Dockerfile_Advanced.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- containers/Resource_Limits.md
# DAY 2
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Exercise_Composefile.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Container_Engines.md
- containers/Windows_Containers.md
- - containers/Orchestration_Overview.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md

View File

@@ -1,73 +0,0 @@
title: |
Containers,
Docker,
Kubernetes
(Partie 2)
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://alfun-2019-06.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- shared/toc.md
# DAY 3
- - shared/prereqs.md
- shared/connecting.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composedown.md
- k8s/kubectlget.md
- k8s/kubectlrun.md
- k8s/deploymentslideshow.md
- - k8s/kubectlexpose.md
- k8s/shippingimages.md
- k8s/buildshiprun-dockerhub.md
- k8s/ourapponkube.md
- k8s/scalingdockercoins.md
- shared/hastyconclusions.md
- k8s/daemonset.md
- - k8s/namespaces.md
- |
# Exercise — from Compose to Kubernetes
Let's run the wordsmith app on Kubernetes!
The code is at: https://github.com/jpetazzo/wordsmith
- k8s/kustomize.md
- k8s/helm.md
#- k8s/create-chart.md
- k8s/rollout.md
- - k8s/healthchecks.md
#- k8s/healthchecks-more.md
- k8s/kubectlproxy.md
- k8s/localkubeconfig.md
- k8s/accessinternal.md
- k8s/dashboard.md
- k8s/setup-k8s.md
# DAY 4
- - k8s/volumes.md
- k8s/configuration.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/prometheus.md
- - k8s/authn-authz.md
- k8s/netpol.md
- k8s/podsecuritypolicy.md
- - k8s/ingress.md
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
#- k8s/extending-api.md
- - k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
- - k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -1,22 +0,0 @@
title: |
Containers,
Docker,
Kubernetes
(Extras)
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
gitrepo: github.com/jpetazzo/container.training
slides: http://alfun-2019-06.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- shared/toc.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md

View File

@@ -1,5 +0,0 @@
<p><a href="alfun-1.yml.html">Lundi / Mardi</a></p>
<p><a href="alfun-2.yml.html">Jeudi / Vendredi</a></p>
<p><a href="alfun-3.yml.html">Extra slides (container internals)</a></p>

View File

@@ -1,11 +1,3 @@
- date: [2019-11-04, 2019-11-05]
country: de
city: Berlin
event: Velocity
speaker: jpetazzo
title: Deploying and scaling applications with Kubernetes
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/79109
- date: 2019-11-13
country: fr
city: Marseille

View File

@@ -167,11 +167,13 @@ What does that mean?
## Let's experiment a bit!
- For the exercises in this section, connect to the first node of the `test` cluster
- For this section, we will use a cluster with 4 nodes
(named node1, node2, node3, node4)
.exercise[
- SSH to the first node of the test cluster
- SSH to the first node of the cluster
- Check that the cluster is operational:
```bash

View File

@@ -18,7 +18,7 @@
(it gives us replication primitives)
- Kubernetes helps us clone / replicate environments
- Kubernetes helps us to clone/replicate environments
(all resources can be described with manifests)

View File

@@ -66,8 +66,6 @@ Look in each plugin's directory for its documentation.
---
class: extra-details
## Conf vs conflist
- There are two slightly different configuration formats

View File

@@ -0,0 +1,265 @@
# Securing the control plane
- Many components accept connections (and requests) from others:
- API server
- etcd
- kubelet
- We must secure these connections:
- to deny unauthorized requests
- to prevent eavesdropping secrets, tokens, and other sensitive information
- Disabling authentication and/or authorization is **strongly discouraged**
(but it's possible to do it, e.g. for learning / troubleshooting purposes)
---
## Authentication and authorization
- Authentication (checking "who you are") is done with mutual TLS
(both the client and the server need to hold a valid certificate)
- Authorization (checking "what you can do") is done in different ways
- the API server implements a sophisticated permission logic (with RBAC)
- some services will defer authorization to the API server (through webhooks)
- some services require a certificate signed by a particular CA / sub-CA
---
## In practice
- We will review the various communication channels in the control plane
- We will describe how they are secured
- When TLS certificates are used, we will indicate:
- which CA signs them
- what their subject (CN) should be, when applicable
- We will indicate how to configure security (client- and server-side)
---
## etcd peers
- Replication and coordination of etcd happens on a dedicated port
(typically port 2380; the default port for normal client connections is 2379)
- Authentication uses TLS certificates with a separate sub-CA
(otherwise, anyone with a Kubernetes client certificate could access etcd!)
- The etcd command line flags involved are:
`--peer-client-cert-auth=true` to activate it
`--peer-cert-file`, `--peer-key-file`, `--peer-trusted-ca-file`
---
## etcd clients
- The only¹ thing that connects to etcd is the API server
- Authentication uses TLS certificates with a separate sub-CA
(for the same reasons as for etcd inter-peer authentication)
- The etcd command line flags involved are:
`--client-cert-auth=true` to activate it
`--trusted-ca-file`, `--cert-file`, `--key-file`
- The API server command line flags involved are:
`--etcd-cafile`, `--etcd-certfile`, `--etcd-keyfile`
.footnote[¹Technically, there is also the etcd healthcheck. Let's ignore it for now.]
---
## API server clients
- The API server has a sophisticated authentication and authorization system
- For connections coming from other components of the control plane:
- authentication uses certificates (trusting the certificates' subject or CN)
- authorization uses whatever mechanism is enabled (most oftentimes, RBAC)
- The relevant API server flags are:
`--client-ca-file`, `--tls-cert-file`, `--tls-private-key-file`
- Each component connecting to the API server takes a `--kubeconfig` flag
(to specify a kubeconfig file containing the CA cert, client key, and client cert)
- Yes, that kubeconfig file follows the same format as our `~/.kube/config` file!
---
## Kubelet and API server
- Communication between kubelet and API server can be established both ways
- Kubelet → API server:
- kubelet registers itself ("hi, I'm node42, do you have work for me?")
- connection is kept open and re-established if it breaks
- that's how the kubelet knows which pods to start/stop
- API server → kubelet:
- used to retrieve logs, exec, attach to containers
---
## Kubelet → API server
- Kubelet is started with `--kubeconfig` with API server information
- The client certificate of the kubelet will typically have:
`CN=system:node:<nodename>` and groups `O=system:nodes`
- Nothing special on the API server side
(it will authenticate like any other client)
---
## API server → kubelet
- Kubelet is started with the flag `--client-ca-file`
(typically using the same CA as the API server)
- API server will use a dedicated key pair when contacting kubelet
(specified with `--kubelet-client-certificate` and `--kubelet-client-key`)
- Authorization uses webhooks
(enabled with `--authorization-mode=Webhook` on kubelet)
- The webhook server is the API server itself
(the kubelet sends back a request to the API server to ask, "can this person do that?")
---
## Scheduler
- The scheduler connects to the API server like an ordinary client
- The certificate of the scheduler will have `CN=system:kube-scheduler`
---
## Controller manager
- The controller manager is also a normal client to the API server
- Its certificate will have `CN=system:kube-controller-manager`
- If we use the CSR API, the controller manager needs the CA cert and key
(passed with flags `--cluster-signing-cert-file` and `--cluster-signing-key-file`)
- We usually want the controller manager to generate tokens for service accounts
- These tokens deserve some details (on the next slide!)
---
## Service account tokens
- Each time we create a service account, the controller manager generates a token
- These tokens are JWT tokens, signed with a particular key
- These tokens are used for authentication with the API server
(and therefore, the API server needs to be able to verify their integrity)
- This uses another keypair:
- the private key (used for signature) is passed to the controller manager
<br/>(using flags `--service-account-private-key-file` and `--root-ca-file`)
- the public key (used for verification) is passed to the API server
<br/>(using flag `--service-account-key-file`)
---
## kube-proxy
- kube-proxy is "yet another API server client"
- In many clusters, it runs as a Daemon Set
- In that case, it will have its own Service Account and associated permissions
- It will authenticate using the token of that Service Account
---
## Webhooks
- We mentioned webhooks earlier; how does that really work?
- The Kubernetes API has special resource types to check permissions
- One of them is SubjectAccessReview
- To check if a particular user can do a particular action on a particular resource:
- we prepare a SubjectAccessReview object
- we send that object to the API server
- the API server responds with allow/deny (and optional explanations)
- Using webhooks for authorization = sending SAR to authorize each request
---
## Subject Access Review
Here is an example showing how to check if `jean.doe` can `get` some `pods` in `kube-system`:
```bash
kubectl -v9 create -f- <<EOF
apiVersion: authorization.k8s.io/v1beta1
kind: SubjectAccessReview
spec:
user: jean.doe
group:
- foo
- bar
resourceAttributes:
#group: blah.k8s.io
namespace: kube-system
resource: pods
verb: get
#name: web-xyz1234567-pqr89
EOF
```

View File

@@ -0,0 +1,367 @@
# Creating Helm charts
- We are going to create a generic Helm chart
- We will use that Helm chart to deploy DockerCoins
- Each component of DockerCoins will have its own *release*
- In other words, we will "install" that Helm chart multiple times
(one time per component of DockerCoins)
---
## Creating a generic chart
- Rather than starting from scratch, we will use `helm create`
- This will give us a basic chart that we will customize
.exercise[
- Create a basic chart:
```bash
cd ~
helm create helmcoins
```
]
This creates a basic chart in the directory `helmcoins`.
---
## What's in the basic chart?
- The basic chart will create a Deployment and a Service
- Optionally, it will also include an Ingress
- If we don't pass any values, it will deploy the `nginx` image
- We can override many things in that chart
- Let's try to deploy DockerCoins components with that chart!
---
## Writing `values.yaml` for our components
- We need to write one `values.yaml` file for each component
(hasher, redis, rng, webui, worker)
- We will start with the `values.yaml` of the chart, and remove what we don't need
- We will create 5 files:
hasher.yaml, redis.yaml, rng.yaml, webui.yaml, worker.yaml
---
## Getting started
- For component X, we want to use the image dockercoins/X:v0.1
(for instance, for rng, we want to use the image dockercoins/rng:v0.1)
- Exception: for redis, we want to use the official image redis:latest
.exercise[
- Write minimal YAML files for the 5 components, specifying only the image
]
--
*Hint: our YAML files should look like this.*
```yaml
### rng.yaml
image:
repository: dockercoins/`rng`
tag: v0.1
```
---
## Deploying DockerCoins components
- For convenience, let's work in a separate namespace
.exercise[
- Create a new namespace:
```bash
kubectl create namespace helmcoins
```
- Switch to that namespace:
```bash
kns helmcoins
```
]
---
## Deploying the chart
- To install a chart, we can use the following command:
```bash
helm install [--name `X`] <chart>
```
- We can also use the following command, which is idempotent:
```bash
helm upgrade --install `X` chart
```
.exercise[
- Install the 5 components of DockerCoins:
```bash
for COMPONENT in hasher redis rng webui worker; do
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
done
```
]
---
## Checking what we've done
- Let's see if DockerCoins is working!
.exercise[
- Check the logs of the worker:
```bash
stern worker
```
- Look at the resources that were created:
```bash
kubectl get all
```
]
There are *many* issues to fix!
---
## Service names
- Our services should be named `rng`, `hasher`, etc., but they are named differently
- Look at the YAML template used for the services
- Does it look like we can override the name of the services?
--
- *Yes*, we can use `.Values.nameOverride`
- This means setting `nameOverride` in the values YAML file
---
## Setting service names
- Let's add `nameOverride: X` in each values YAML file!
(where X is hasher, redis, rng, etc.)
.exercise[
- Edit the 5 YAML files to add `nameOverride: X`
- Deploy the updated Chart:
```bash
for COMPONENT in hasher redis rng webui worker; do
helm upgrade --install $COMPONENT helmcoins/ --values=$COMPONENT.yaml
done
```
(Yes, this is exactly the same command as before!)
]
---
## Checking what we've done
.exercise[
- Check the service names:
```bash
kubectl get services
```
Great! (We have a useless service for `worker`, but let's ignore it for now.)
- Check the state of the pods:
```bash
kubectl get pods
```
Not so great... Some pods are *not ready.*
]
---
## Troubleshooting pods
- The easiest way to troubleshoot pods is to look at *events*
- We can look at all the events on the cluster (with `kubectl get events`)
- Or we can use `kubectl describe` on the objects that have problems
(`kubectl describe` will retrieve the events related to the object)
.exercise[
- Check the events for the redis pods:
```bash
kubectl describe pod -l app.kubernetes.io/name=redis
```
]
What's going on?
---
## Healthchecks
- The default chart defines healthchecks doing HTTP requests on port 80
- That won't work for redis and worker
(redis is not HTTP, and not on port 80; worker doesn't even listen)
--
- We could comment out the healthchecks
- We could also make them conditional
- This sounds more interesting, let's do that!
---
## Conditionals
- We need to enclose the healthcheck block with:
`{{ if CONDITION }}` at the beginning
`{{ end }}` at the end
- For the condition, we will use `.Values.healthcheck`
---
## Updating the deployment template
.exercise[
- Edit `helmcoins/templates/deployment.yaml`
- Before the healthchecks section (it starts with `livenessProbe:`), add:
`{{ if .Values.healthcheck }}`
- After the healthchecks section (just before `resources:`), add:
`{{ end }}`
- Edit `hasher.yaml`, `rng.yaml`, `webui.yaml` to add:
`healthcheck: true`
]
---
## Update the deployed charts
- We can now apply the new templates (and the new values)
.exercise[
- Use the same command as earlier to upgrade all five components
- Use `kubectl describe` to confirm that `redis` starts correctly
- Use `kubectl describe` to confirm that `hasher` still has healthchecks
]
---
## Is it working now?
- If we look at the worker logs, it appears that the worker is still stuck
- What could be happening?
--
- The redis service is not on port 80!
- We need to update the port number in redis.yaml
- We also need to update the port number in deployment.yaml
(it is hard-coded to 80 there)
---
## Setting the redis port
.exercise[
- Edit `redis.yaml` to add:
```yaml
service:
port: 6379
```
- Edit `helmcoins/templates/deployment.yaml`
- The line with `containerPort` should be:
```yaml
containerPort: {{ .Values.service.port }}
```
]
---
## Apply changes
- Re-run the for loop to execute `helm upgrade` one more time
- Check the worker logs
- This time, it should be working!
---
## Extra steps
- We don't need to create a service for the worker
- We can put the whole service block in a conditional
(this will require additional changes in other files referencing the service)
- We can set the webui to be a NodePort service
- We can change the number of workers with `replicaCount`
- And much more!

View File

@@ -38,7 +38,7 @@
<!-- ##VERSION## -->
- Unfortunately, as of Kubernetes 1.15, the CLI cannot create daemon sets
- Unfortunately, as of Kubernetes 1.14, the CLI cannot create daemon sets
--

View File

@@ -175,7 +175,7 @@ Success!
]
We should get `No resources found.` and the `kubernetes` service, respectively.
So far, so good.
Note: the API server automatically created the `kubernetes` service entry.
@@ -225,7 +225,7 @@ Success?
]
Our Deployment is in bad shape:
Our Deployment is in a bad shape:
```
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/web 0/1 0 0 2m26s

View File

@@ -304,9 +304,9 @@ It will use the default success threshold (1 successful attempt = alive).
- We need to make sure that the healthcheck doesn't trip when
performance degrades due to external pressure
- Using a readiness check would have fewer effects
- Using a readiness check would have lesser effects
(but it would still be an imperfect solution)
(but it still would be an imperfect solution)
- A possible combination:
@@ -344,7 +344,7 @@ class: extra-details
- When a process is killed, its children are *orphaned* and attached to PID 1
- PID 1 has the responsibility of *reaping* these processes when they terminate
- PID 1 has the responsibility if *reaping* these processes when they terminate
- OK, but how does that affect us?
@@ -378,11 +378,11 @@ class: extra-details
(because worker isn't a backend for a service)
- Liveness may help us restart a broken worker, but how can we check it?
- Liveness may help us to restart a broken worker, but how can we check it?
- Embedding an HTTP server is an option
(but it has a high potential for unwanted side effects and false positives)
(but it has a high potential for unwanted side-effects and false positives)
- Using a "lease" file can be relatively easy:

View File

@@ -34,11 +34,11 @@
- Download the `kubectl` binary from one of these links:
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.15.0/bin/linux/amd64/kubectl)
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/linux/amd64/kubectl)
|
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.15.0/bin/darwin/amd64/kubectl)
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/darwin/amd64/kubectl)
|
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.15.0/bin/windows/amd64/kubectl.exe)
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.14.2/bin/windows/amd64/kubectl.exe)
- On Linux and macOS, make the binary executable with `chmod +x kubectl`

View File

@@ -195,7 +195,7 @@ class: extra-details
## Check our pods
- The pods will be scheduled on the nodes
- The pods will be scheduled to the nodes
- The nodes will pull the `nginx` image, and start the pods
@@ -325,7 +325,7 @@ class: extra-details
- We will add the `--network-plugin` and `--pod-cidr` flags
- We all have a "cluster number" (let's call that `C`) printed on your VM info card
- We all have a "cluster number" (let's call that `C`)
- We will use pod CIDR `10.C.N.0/24` (where `N` is the node number: 1, 2, 3)
@@ -480,23 +480,6 @@ Sometimes it works, sometimes it doesn't. Why?
```bash
kubectl get nodes -o wide
```
---
## Firewalling
- By default, Docker prevents containers from using arbitrary IP addresses
(by setting up iptables rules)
- We need to allow our containers to use our pod CIDR
- For simplicity, we will insert a blanket iptables rule allowing all traffic:
`iptables -I FORWARD -j ACCEPT`
- This has to be done on every node
---
## Setting up routing
@@ -505,8 +488,6 @@ Sometimes it works, sometimes it doesn't. Why?
- Create all the routes on all the nodes
- Insert the iptables rule allowing traffic
- Check that you can ping all the pods from one of the nodes
- Check that you can `curl` the ClusterIP of the Service successfully

View File

@@ -86,7 +86,7 @@ class: extra-details
## What can we store via the Kubernetes API?
- The API server stores most Kubernetes resources in etcd
- The API server stores most Kubernetes resources into etcd
- Etcd is designed for reliability, not for performance

View File

@@ -1,4 +1,4 @@
# Highly available Persistent Volumes
# Highly available Persistent Volumes (extra material)
- How can we achieve true durability?

View File

@@ -30,6 +30,8 @@
- Go to @@SLIDES@@ to view these slides
- Join the chat room: @@CHAT@@
<!-- ```open @@SLIDES@@``` -->
]

View File

@@ -1,6 +1,6 @@
## Versions installed
- Kubernetes 1.15.0
- Kubernetes 1.14.2
- Docker Engine 18.09.6
- Docker Compose 1.21.1
@@ -23,7 +23,7 @@ class: extra-details
## Kubernetes and Docker compatibility
- Kubernetes 1.15 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#dependencies)
- Kubernetes 1.14 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#external-dependencies)
<br/>
(the latest version when Kubernetes 1.14 was released)

View File

@@ -1,11 +1,19 @@
## Intros
- Hello! I', Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
- Hello! We are:
- The training will run from 9am to 6pm
- .emoji[👷🏻‍♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Tiny Shell Script LLC)
- There will be a lunch break (and coffee breaks!)
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Ardan Labs LLC)
- The training will run from 9am to 5pm
- There will be a lunch break
(And coffee breaks!)
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
- Live feedback, questions, help: @@CHAT@@

61
slides/sfsf.yml Normal file
View File

@@ -0,0 +1,61 @@
title: |
Kubernetes
Advanced
Training
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
chat: "[Gitter](https://gitter.im/jpetazzo/training-20190613-sanfrancisco)"
#chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://sfsf-2019-06.container.training/
exclude:
- self-paced
chapters:
- shared/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
# DAY 1
- - k8s/prereqs-admin.md
- k8s/architecture.md
- k8s/deploymentslideshow.md
- k8s/dmuc.md
- - k8s/multinode.md
- k8s/cni.md
- k8s/apilb.md
- - k8s/kubercoins.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/healthchecks.md
- k8s/healthchecks-more.md
- - k8s/volumes.md
- k8s/configuration.md
- k8s/statefulsets.md
- k8s/local-persistent-volumes.md
- k8s/portworx.md
# DAY 2
- - k8s/namespaces.md
- k8s/kustomize.md
- k8s/helm.md
- k8s/create-chart.md
- k8s/create-more-charts.md
- - k8s/extending-api.md
- k8s/operators.md
- k8s/operators-design.md
- k8s/owners-and-dependents.md
- - k8s/authn-authz.md
- k8s/control-plane-auth.md
- k8s/prometheus.md
- - k8s/resource-limits.md
- k8s/metrics-server.md
- k8s/cluster-sizing.md
- k8s/horizontal-pod-autoscaler.md
# CONCLUSION
- - k8s/lastwords-admin.md
- k8s/links.md
- shared/thankyou.md

View File

@@ -50,6 +50,8 @@ Misattributed to Benjamin Franklin
- Go to @@SLIDES@@ to view these slides
- Join the chat room: @@CHAT@@
<!-- ```open @@SLIDES@@``` -->
]