mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 18:19:56 +00:00
Compare commits
42 Commits
gitpod
...
kadm-2019-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4dcdebcd0d | ||
|
|
5464f4e639 | ||
|
|
2be63105d5 | ||
|
|
2366d9f34d | ||
|
|
33e13958eb | ||
|
|
a4a9dfc59f | ||
|
|
8b5f105448 | ||
|
|
ad1c32bd7f | ||
|
|
3d908fe540 | ||
|
|
52736c54d7 | ||
|
|
f1253b0b1a | ||
|
|
c1de412179 | ||
|
|
abfd23008c | ||
|
|
d0669cf668 | ||
|
|
20de74db09 | ||
|
|
b44e8ef118 | ||
|
|
1fc773f76d | ||
|
|
60cbaf9820 | ||
|
|
01d45b2535 | ||
|
|
d511fc6489 | ||
|
|
ff810274c0 | ||
|
|
e6a5a0b837 | ||
|
|
fd54bdd4a6 | ||
|
|
7e97dd68e0 | ||
|
|
aa670f5351 | ||
|
|
42ed6fc56a | ||
|
|
2c0b4b15ba | ||
|
|
59c2ff1911 | ||
|
|
879e7f2ec9 | ||
|
|
68f35bd2ed | ||
|
|
f4ef2bd6d4 | ||
|
|
baf428ebdb | ||
|
|
3a87183a66 | ||
|
|
3f70ee2c2a | ||
|
|
4c55336079 | ||
|
|
39027675d5 | ||
|
|
c565dad43c | ||
|
|
b0f01e018c | ||
|
|
69f9cee6c9 | ||
|
|
b69119eed4 | ||
|
|
4c5da9ed0d | ||
|
|
6b9b83a7ae |
@@ -1,4 +1,4 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
/ /kadm-twodays.yml.html 200!
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
#- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- - containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- - containers/Docker_Machine.md
|
||||
- containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
#- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,63 +0,0 @@
|
||||
title: |
|
||||
Introduction
|
||||
to Containers
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- containers/Exercise_Dockerfile_Basic.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- containers/Exercise_Dockerfile_Advanced.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Windows_Containers.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Exercise_Composefile.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
@@ -1,4 +1,4 @@
|
||||
# TLS bootstrap
|
||||
# TLS bootstrap (extra material)
|
||||
|
||||
- kubelet needs TLS keys and certificates to communicate with the control plane
|
||||
|
||||
|
||||
265
slides/k8s/control-plane-auth.md
Normal file
265
slides/k8s/control-plane-auth.md
Normal file
@@ -0,0 +1,265 @@
|
||||
# Securing the control plane
|
||||
|
||||
- Many components accept connections (and requests) from others:
|
||||
|
||||
- API server
|
||||
|
||||
- etcd
|
||||
|
||||
- kubelet
|
||||
|
||||
- We must secure these connections:
|
||||
|
||||
- to deny unauthorized requests
|
||||
|
||||
- to prevent eavesdropping secrets, tokens, and other sensitive information
|
||||
|
||||
- Disabling authentication and/or authorization is **strongly discouraged**
|
||||
|
||||
(but it's possible to do it, e.g. for learning / troubleshooting purposes)
|
||||
|
||||
---
|
||||
|
||||
## Authentication and authorization
|
||||
|
||||
- Authentication (checking "who you are") is done with mutual TLS
|
||||
|
||||
(both the client and the server need to hold a valid certificate)
|
||||
|
||||
- Authorization (checking "what you can do") is done in different ways
|
||||
|
||||
- the API server implements a sophisticated permission logic (with RBAC)
|
||||
|
||||
- some services will defer authorization to the API server (through webhooks)
|
||||
|
||||
- some services require a certificate signed by a particular CA / sub-CA
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We will review the various communication channels in the control plane
|
||||
|
||||
- We will describe how they are secured
|
||||
|
||||
- When TLS certificates are used, we will indicate:
|
||||
|
||||
- which CA signs them
|
||||
|
||||
- what their subject (CN) should be, when applicable
|
||||
|
||||
- We will indicate how to configure security (client- and server-side)
|
||||
|
||||
---
|
||||
|
||||
## etcd peers
|
||||
|
||||
- Replication and coordination of etcd happens on a dedicated port
|
||||
|
||||
(typically port 2380; the default port for normal client connections is 2379)
|
||||
|
||||
- Authentication uses TLS certificates with a separate sub-CA
|
||||
|
||||
(otherwise, anyone with a Kubernetes client certificate could access etcd!)
|
||||
|
||||
- The etcd command line flags involved are:
|
||||
|
||||
`--peer-client-cert-auth=true` to activate it
|
||||
|
||||
`--peer-cert-file`, `--peer-key-file`, `--peer-trusted-ca-file`
|
||||
|
||||
---
|
||||
|
||||
## etcd clients
|
||||
|
||||
- The only¹ thing that connects to etcd is the API server
|
||||
|
||||
- Authentication uses TLS certificates with a separate sub-CA
|
||||
|
||||
(for the same reasons as for etcd inter-peer authentication)
|
||||
|
||||
- The etcd command line flags involved are:
|
||||
|
||||
`--client-cert-auth=true` to activate it
|
||||
|
||||
`--trusted-ca-file`, `--cert-file`, `--key-file`
|
||||
|
||||
- The API server command line flags involved are:
|
||||
|
||||
`--etcd-cafile`, `--etcd-certfile`, `--etcd-keyfile`
|
||||
|
||||
.footnote[¹Technically, there is also the etcd healthcheck. Let's ignore it for now.]
|
||||
|
||||
---
|
||||
|
||||
## API server clients
|
||||
|
||||
- The API server has a sophisticated authentication and authorization system
|
||||
|
||||
- For connections coming from other components of the control plane:
|
||||
|
||||
- authentication uses certificates (trusting the certificates' subject or CN)
|
||||
|
||||
- authorization uses whatever mechanism is enabled (most oftentimes, RBAC)
|
||||
|
||||
- The relevant API server flags are:
|
||||
|
||||
`--client-ca-file`, `--tls-cert-file`, `--tls-private-key-file`
|
||||
|
||||
- Each component connecting to the API server takes a `--kubeconfig` flag
|
||||
|
||||
(to specify a kubeconfig file containing the CA cert, client key, and client cert)
|
||||
|
||||
- Yes, that kubeconfig file follows the same format as our `~/.kube/config` file!
|
||||
|
||||
---
|
||||
|
||||
## Kubelet and API server
|
||||
|
||||
- Communication between kubelet and API server can be established both ways
|
||||
|
||||
- Kubelet → API server:
|
||||
|
||||
- kubelet registers itself ("hi, I'm node42, do you have work for me?")
|
||||
|
||||
- connection is kept open and re-established if it breaks
|
||||
|
||||
- that's how the kubelet knows which pods to start/stop
|
||||
|
||||
- API server → kubelet:
|
||||
|
||||
- used to retrieve logs, exec, attach to containers
|
||||
|
||||
---
|
||||
|
||||
## Kubelet → API server
|
||||
|
||||
- Kubelet is started with `--kubeconfig` with API server information
|
||||
|
||||
- The client certificate of the kubelet will typically have:
|
||||
|
||||
`CN=system:node:<nodename>` and groups `O=system:nodes`
|
||||
|
||||
- Nothing special on the API server side
|
||||
|
||||
(it will authenticate like any other client)
|
||||
|
||||
---
|
||||
|
||||
## API server → kubelet
|
||||
|
||||
- Kubelet is started with the flag `--client-ca-file`
|
||||
|
||||
(typically using the same CA as the API server)
|
||||
|
||||
- API server will use a dedicated key pair when contacting kubelet
|
||||
|
||||
(specified with `--kubelet-client-certificate` and `--kubelet-client-key`)
|
||||
|
||||
- Authorization uses webhooks
|
||||
|
||||
(enabled with `--authorization-mode=Webhook` on kubelet)
|
||||
|
||||
- The webhook server is the API server itself
|
||||
|
||||
(the kubelet sends back a request to the API server to ask, "can this person do that?")
|
||||
|
||||
---
|
||||
|
||||
## Scheduler
|
||||
|
||||
- The scheduler connects to the API server like an ordinary client
|
||||
|
||||
- The certificate of the scheduler will have `CN=system:kube-scheduler`
|
||||
|
||||
---
|
||||
|
||||
## Controller manager
|
||||
|
||||
- The controller manager is also a normal client to the API server
|
||||
|
||||
- Its certificate will have `CN=system:kube-controller-manager`
|
||||
|
||||
- If we use the CSR API, the controller manager needs the CA cert and key
|
||||
|
||||
(passed with flags `--cluster-signing-cert-file` and `--cluster-signing-key-file`)
|
||||
|
||||
- We usually want the controller manager to generate tokens for service accounts
|
||||
|
||||
- These tokens deserve some details (on the next slide!)
|
||||
|
||||
---
|
||||
|
||||
## Service account tokens
|
||||
|
||||
- Each time we create a service account, the controller manager generates a token
|
||||
|
||||
- These tokens are JWT tokens, signed with a particular key
|
||||
|
||||
- These tokens are used for authentication with the API server
|
||||
|
||||
(and therefore, the API server needs to be able to verify their integrity)
|
||||
|
||||
- This uses another keypair:
|
||||
|
||||
- the private key (used for signature) is passed to the controller manager
|
||||
<br/>(using flags `--service-account-private-key-file` and `--root-ca-file`)
|
||||
|
||||
- the public key (used for verification) is passed to the API server
|
||||
<br/>(using flag `--service-account-key-file`)
|
||||
|
||||
---
|
||||
|
||||
## kube-proxy
|
||||
|
||||
- kube-proxy is "yet another API server client"
|
||||
|
||||
- In many clusters, it runs as a Daemon Set
|
||||
|
||||
- In that case, it will have its own Service Account and associated permissions
|
||||
|
||||
- It will authenticate using the token of that Service Account
|
||||
|
||||
---
|
||||
|
||||
## Webhooks
|
||||
|
||||
- We mentioned webhooks earlier; how does that really work?
|
||||
|
||||
- The Kubernetes API has special resource types to check permissions
|
||||
|
||||
- One of them is SubjectAccessReview
|
||||
|
||||
- To check if a particular user can do a particular action on a particular resource:
|
||||
|
||||
- we prepare a SubjectAccessReview object
|
||||
|
||||
- we send that object to the API server
|
||||
|
||||
- the API server responds with allow/deny (and optional explanations)
|
||||
|
||||
- Using webhooks for authorization = sending SAR to authorize each request
|
||||
|
||||
---
|
||||
|
||||
## Subject Access Review
|
||||
|
||||
Here is an example showing how to check if `jean.doe` can `get` some `pods` in `kube-system`:
|
||||
|
||||
```bash
|
||||
kubectl -v9 create -f- <<EOF
|
||||
apiVersion: authorization.k8s.io/v1beta1
|
||||
kind: SubjectAccessReview
|
||||
spec:
|
||||
user: jean.doe
|
||||
group:
|
||||
- foo
|
||||
- bar
|
||||
resourceAttributes:
|
||||
#group: blah.k8s.io
|
||||
namespace: kube-system
|
||||
resource: pods
|
||||
verb: get
|
||||
#name: web-xyz1234567-pqr89
|
||||
EOF
|
||||
```
|
||||
@@ -62,7 +62,7 @@ This is what I do if I want to obtain a certificate.
|
||||
|
||||
2. Create a Certificate Signing Request (CSR).
|
||||
|
||||
(The CSR contains the identity that I claim and an expiration date.)
|
||||
(The CSR contains the identity that I claim and a public key.)
|
||||
|
||||
3. Send that CSR to the Certificate Authority (CA).
|
||||
|
||||
@@ -244,7 +244,7 @@ For a user named `jean.doe`, we will have:
|
||||
|
||||
The command above generates:
|
||||
|
||||
- a 2048-bit RSA key, without DES encryption, stored in key.pem
|
||||
- a 2048-bit RSA key, without encryption, stored in key.pem
|
||||
- a CSR for the name `jean.doe` in group `devs`
|
||||
|
||||
---
|
||||
@@ -345,7 +345,7 @@ The command above generates:
|
||||
kctx -
|
||||
```
|
||||
|
||||
- Retrieve the certificate from the CSR:
|
||||
- Retrieve the updated CSR object and extract the certificate:
|
||||
```bash
|
||||
kubectl get csr users:jean.doe \
|
||||
-o jsonpath={.status.certificate} \
|
||||
|
||||
393
slides/k8s/healthchecks-more.md
Normal file
393
slides/k8s/healthchecks-more.md
Normal file
@@ -0,0 +1,393 @@
|
||||
## Questions to ask before adding healthchecks
|
||||
|
||||
- Do we want liveness, readiness, both?
|
||||
|
||||
(sometimes, we can use the same check, but with different failure thresholds)
|
||||
|
||||
- Do we have existing HTTP endpoints that we can use?
|
||||
|
||||
- Do we need to add new endpoints, or perhaps use something else?
|
||||
|
||||
- Are our healthchecks likely to use resources and/or slow down the app?
|
||||
|
||||
- Do they depend on additional services?
|
||||
|
||||
(this can be particularly tricky, see next slide)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks and dependencies
|
||||
|
||||
- A good healthcheck should always indicate the health of the service itself
|
||||
|
||||
- It should not be affected by the state of the service's dependencies
|
||||
|
||||
- Example: a web server requiring a database connection to operate
|
||||
|
||||
(make sure that the healthcheck can report "OK" even if the database is down;
|
||||
<br/>
|
||||
because it won't help us to restart the web server if the issue is with the DB!)
|
||||
|
||||
- Example: a microservice calling other microservices
|
||||
|
||||
- Example: a worker process
|
||||
|
||||
(these will generally require minor code changes to report health)
|
||||
|
||||
---
|
||||
|
||||
# Adding healthchecks to an app (bonus material)
|
||||
|
||||
- Let's add healthchecks to the DockerCoins demo app!
|
||||
|
||||
- We will examine the questions of the previous slide
|
||||
|
||||
- Then we will review each component individually to add healthchecks
|
||||
|
||||
---
|
||||
|
||||
## Liveness, readiness, or both?
|
||||
|
||||
- To answer that question, we need to see the app run for a while
|
||||
|
||||
- Do we get temporary, recoverable glitches?
|
||||
|
||||
→ then use readiness
|
||||
|
||||
- Or do we get hard lock-ups requiring a restart?
|
||||
|
||||
→ then use liveness
|
||||
|
||||
- In the case of DockerCoins, we don't know yet!
|
||||
|
||||
- Let's pick liveness
|
||||
|
||||
---
|
||||
|
||||
## Do we have HTTP endpoints that we can use?
|
||||
|
||||
- Each of the 3 web services (hasher, rng, webui) has a trivial route on `/`
|
||||
|
||||
- These routes:
|
||||
|
||||
- don't seem to perform anything complex or expensive
|
||||
|
||||
- don't seem to call other services
|
||||
|
||||
- Perfect!
|
||||
|
||||
(See next slides for individual details)
|
||||
|
||||
---
|
||||
|
||||
- [hasher.rb](https://github.com/jpetazzo/container.training/blob/master/dockercoins/hasher/hasher.rb)
|
||||
```ruby
|
||||
get '/' do
|
||||
"HASHER running on #{Socket.gethostname}\n"
|
||||
end
|
||||
```
|
||||
|
||||
- [rng.py](https://github.com/jpetazzo/container.training/blob/master/dockercoins/rng/rng.py)
|
||||
```python
|
||||
@app.route("/")
|
||||
def index():
|
||||
return "RNG running on {}\n".format(hostname)
|
||||
```
|
||||
|
||||
- [webui.js](https://github.com/jpetazzo/container.training/blob/master/dockercoins/webui/webui.js)
|
||||
```javascript
|
||||
app.get('/', function (req, res) {
|
||||
res.redirect('/index.html');
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running DockerCoins
|
||||
|
||||
- We will run DockerCoins in a new, separate namespace
|
||||
|
||||
- We will use a set of YAML manifests and pre-built images
|
||||
|
||||
- We will add our new liveness probe to the YAML of the `rng` DaemonSet
|
||||
|
||||
- Then, we will deploy the application
|
||||
|
||||
---
|
||||
|
||||
## Creating a new namespace
|
||||
|
||||
- This will make sure that we don't collide / conflict with previous exercises
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the yellow namespace:
|
||||
```bash
|
||||
kubectl create namespace yellow
|
||||
```
|
||||
|
||||
- Switch to that namespace:
|
||||
```bash
|
||||
kns yellow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Retrieving DockerCoins manifests
|
||||
|
||||
- All the manifests that we need are on a convenient repository:
|
||||
|
||||
https://github.com/jpetazzo/kubercoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Clone that repository:
|
||||
```bash
|
||||
cd ~
|
||||
git clone https://github.com/jpetazzo/kubercoins
|
||||
```
|
||||
|
||||
- Change directory to the repository:
|
||||
```bash
|
||||
cd kubercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## A simple HTTP liveness probe
|
||||
|
||||
This is what our liveness probe should look like:
|
||||
|
||||
```yaml
|
||||
containers:
|
||||
- name: ...
|
||||
image: ...
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 5
|
||||
```
|
||||
|
||||
This will give 30 seconds to the service to start. (Way more than necessary!)
|
||||
<br/>
|
||||
It will run the probe every 5 seconds.
|
||||
<br/>
|
||||
It will use the default timeout (1 second).
|
||||
<br/>
|
||||
It will use the default failure threshold (3 failed attempts = dead).
|
||||
<br/>
|
||||
It will use the default success threshold (1 successful attempt = alive).
|
||||
|
||||
---
|
||||
|
||||
## Adding the liveness probe
|
||||
|
||||
- Let's add the liveness probe, then deploy DockerCoins
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `rng-daemonset.yaml` and add the liveness probe
|
||||
```bash
|
||||
vim rng-daemonset.yaml
|
||||
```
|
||||
|
||||
- Load the YAML for all the resources of DockerCoins:
|
||||
```bash
|
||||
kubectl apply -f .
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing the liveness probe
|
||||
|
||||
- The rng service needs 100ms to process a request
|
||||
|
||||
(because it is single-threaded and sleeps 0.1s in each request)
|
||||
|
||||
- The probe timeout is set to 1 second
|
||||
|
||||
- If we send more than 10 requests per second per backend, it will break
|
||||
|
||||
- Let's generate traffic and see what happens!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get the ClusterIP address of the rng service:
|
||||
```bash
|
||||
kubectl get svc rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Monitoring the rng service
|
||||
|
||||
- Each command below will show us what's happening on a different level
|
||||
|
||||
.exercise[
|
||||
|
||||
- In one window, monitor cluster events:
|
||||
```bash
|
||||
kubectl get events -w
|
||||
```
|
||||
|
||||
- In another window, monitor the response time of rng:
|
||||
```bash
|
||||
httping `<ClusterIP>`
|
||||
```
|
||||
|
||||
- In another window, monitor pods status:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Generating traffic
|
||||
|
||||
- Let's use `ab` to send concurrent requests to rng
|
||||
|
||||
.exercise[
|
||||
|
||||
- In yet another window, generate traffic:
|
||||
```bash
|
||||
ab -c 10 -n 1000 http://`<ClusterIP>`/1
|
||||
```
|
||||
|
||||
- Experiment with higher values of `-c` and see what happens
|
||||
|
||||
]
|
||||
|
||||
- The `-c` parameter indicates the number of concurrent requests
|
||||
|
||||
- The final `/1` is important to generate actual traffic
|
||||
|
||||
(otherwise we would use the ping endpoint, which doesn't sleep 0.1s per request)
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
- Above a given threshold, the liveness probe starts failing
|
||||
|
||||
(about 10 concurrent requests per backend should be plenty enough)
|
||||
|
||||
- When the liveness probe fails 3 times in a row, the container is restarted
|
||||
|
||||
- During the restart, there is *less* capacity available
|
||||
|
||||
- ... Meaning that the other backends are likely to timeout as well
|
||||
|
||||
- ... Eventually causing all backends to be restarted
|
||||
|
||||
- ... And each fresh backend gets restarted, too
|
||||
|
||||
- This goes on until the load goes down, or we add capacity
|
||||
|
||||
*This wouldn't be a good healthcheck in a real application!*
|
||||
|
||||
---
|
||||
|
||||
## Better healthchecks
|
||||
|
||||
- We need to make sure that the healthcheck doesn't trip when
|
||||
performance degrades due to external pressure
|
||||
|
||||
- Using a readiness check would have lesser effects
|
||||
|
||||
(but it still would be an imperfect solution)
|
||||
|
||||
- A possible combination:
|
||||
|
||||
- readiness check with a short timeout / low failure threshold
|
||||
|
||||
- liveness check with a longer timeout / higher failure treshold
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for redis
|
||||
|
||||
- A liveness probe is enough
|
||||
|
||||
(it's not useful to remove a backend from rotation when it's the only one)
|
||||
|
||||
- We could use an exec probe running `redis-cli ping`
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Exec probes and zombies
|
||||
|
||||
- When using exec probes, we should make sure that we have a *zombie reaper*
|
||||
|
||||
🤔🧐🧟 Wait, what?
|
||||
|
||||
- When a process terminates, its parent must call `wait()`/`waitpid()`
|
||||
|
||||
(this is how the parent process retrieves the child's exit status)
|
||||
|
||||
- In the meantime, the process is in *zombie* state
|
||||
|
||||
(the process state will show as `Z` in `ps`, `top` ...)
|
||||
|
||||
- When a process is killed, its children are *orphaned* and attached to PID 1
|
||||
|
||||
- PID 1 has the responsibility if *reaping* these processes when they terminate
|
||||
|
||||
- OK, but how does that affect us?
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## PID 1 in containers
|
||||
|
||||
- On ordinary systems, PID 1 (`/sbin/init`) has logic to reap processes
|
||||
|
||||
- In containers, PID 1 is typically our application process
|
||||
|
||||
(e.g. Apache, the JVM, NGINX, Redis ...)
|
||||
|
||||
- These *do not* take care of reaping orphans
|
||||
|
||||
- If we use exec probes, we need to add a process reaper
|
||||
|
||||
- We can add [tini](https://github.com/krallin/tini) to our images
|
||||
|
||||
- Or [share the PID namespace between containers of a pod](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/)
|
||||
|
||||
(and have gcr.io/pause take care of the reaping)
|
||||
|
||||
---
|
||||
|
||||
## Healthchecks for worker
|
||||
|
||||
- Readiness isn't useful
|
||||
|
||||
(because worker isn't a backend for a service)
|
||||
|
||||
- Liveness may help us to restart a broken worker, but how can we check it?
|
||||
|
||||
- Embedding an HTTP server is an option
|
||||
|
||||
(but it has a high potential for unwanted side-effects and false positives)
|
||||
|
||||
- Using a "lease" file can be relatively easy:
|
||||
|
||||
- touch a file during each iteration of the main loop
|
||||
|
||||
- check the timestamp of that file from an exec probe
|
||||
|
||||
- Writing logs (and checking them from the probe) also works
|
||||
377
slides/k8s/openid-connect.md
Normal file
377
slides/k8s/openid-connect.md
Normal file
@@ -0,0 +1,377 @@
|
||||
# OpenID Connect
|
||||
|
||||
- The Kubernetes API server can perform authentication with OpenID connect
|
||||
|
||||
- This requires an *OpenID provider*
|
||||
|
||||
(external authorization server using the OAuth 2.0 protocol)
|
||||
|
||||
- We can use a third-party provider (e.g. Google) or run our own (e.g. Dex)
|
||||
|
||||
- We are going to give an overview of the protocol
|
||||
|
||||
- We will show it in action (in a simplified scenario)
|
||||
|
||||
---
|
||||
|
||||
## Workflow overview
|
||||
|
||||
- We want to access our resources (a Kubernetes cluster)
|
||||
|
||||
- We authenticate with the OpenID provider
|
||||
|
||||
- we can do this directly (e.g. by going to https://accounts.google.com)
|
||||
|
||||
- or maybe a kubectl plugin can open a browser page on our behalf
|
||||
|
||||
- After authenticating us, the OpenID provider gives us:
|
||||
|
||||
- an *id token* (a short-lived signed JSON Web Token, see next slide)
|
||||
|
||||
- a *refresh token* (to renew the previous one when needed)
|
||||
|
||||
- We can now issue requests to the Kubernetes API with the *id token*
|
||||
|
||||
- The API server will verify that token's content to authenticate us
|
||||
|
||||
---
|
||||
|
||||
## JSON Web Tokens
|
||||
|
||||
- A JSON Web Token (JWT) has three parts:
|
||||
|
||||
- a header specifying algorithms and token type
|
||||
|
||||
- a payload (indicating who issued the token, for whom, which purposes...)
|
||||
|
||||
- a signature generated by the issuer (the issuer = the OpenID provider)
|
||||
|
||||
- Anyone can verify a JWT without contacting the issuer
|
||||
|
||||
(except to obtain the issuer's public key)
|
||||
|
||||
- Pro tip: we can inspect a JWT with https://jwt.io/
|
||||
|
||||
---
|
||||
|
||||
## How the Kubernetes API uses JWT
|
||||
|
||||
- Server side
|
||||
|
||||
- enable OIDC authentication
|
||||
|
||||
- indicate which issuer (provider) should be allowed
|
||||
|
||||
- indicate which audience (or "client id") should be allowed
|
||||
|
||||
- if necessary, map or prefix user and group names
|
||||
|
||||
- Client side
|
||||
|
||||
- obtain JWT as described earlier
|
||||
|
||||
- pass JWT as authentication token
|
||||
|
||||
- renew JWT when needed (using the refresh token)
|
||||
|
||||
---
|
||||
|
||||
## Demo time!
|
||||
|
||||
- We will use [Google Accounts](https://accounts.google.com) as our OpenID provider
|
||||
|
||||
- We will use the [Google OAuth Playground](https://developers.google.com/oauthplayground) as the "audience" or "client id"
|
||||
|
||||
- We will obtain a JWT through Google Accounts and the OAuth Playground
|
||||
|
||||
- We will enable OIDC in the Kubernetes API server
|
||||
|
||||
- We will use the JWT to authenticate
|
||||
|
||||
.footnote[If you can't or won't use a Google Account, you can try to adapt to another provider.]
|
||||
|
||||
---
|
||||
|
||||
## Checking the API server logs
|
||||
|
||||
- The API server logs will be particularly useful in this section
|
||||
|
||||
(they will indicate e.g. why a specific token is rejected)
|
||||
|
||||
- Let's keep an eye on the API server output!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tail the logs of the API server:
|
||||
```bash
|
||||
kubectl logs kube-apiserver-node1 --follow --namespace=kube-system
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Authenticate with the OpenID provider
|
||||
|
||||
- We will use the Google OAuth Playground for convenience
|
||||
|
||||
- In a real scenario, we would need our own OAuth client instead of the playground
|
||||
|
||||
(even if we were still using Google as the OpenID provider)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Open the Google OAuth Playground:
|
||||
```
|
||||
https://developers.google.com/oauthplayground/
|
||||
```
|
||||
|
||||
- Enter our own custom scope in the text field:
|
||||
```
|
||||
https://www.googleapis.com/auth/userinfo.email
|
||||
```
|
||||
|
||||
- Click on "Authorize APIs" and allow the playground to access our email address
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Obtain our JSON Web Token
|
||||
|
||||
- The previous step gave us an "authorization code"
|
||||
|
||||
- We will use it to obtain tokens
|
||||
|
||||
.exercise[
|
||||
|
||||
- Click on "Exchange authorization code for tokens"
|
||||
|
||||
]
|
||||
|
||||
- The JWT is the very long `id_token` that shows up on the right hand side
|
||||
|
||||
(it is a base64-encoded JSON object, and should therefore start with `eyJ`)
|
||||
|
||||
---
|
||||
|
||||
## Using our JSON Web Token
|
||||
|
||||
- We need to create a context (in kubeconfig) for our token
|
||||
|
||||
(if we just add the token or use `kubectl --token`, our certificate will still be used)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a new authentication section in kubeconfig:
|
||||
```bash
|
||||
kubectl config set-credentials myjwt --token=eyJ...
|
||||
```
|
||||
|
||||
- Try to use it:
|
||||
```bash
|
||||
kubectl --user=myjwt get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should get an `Unauthorized` response, since we haven't enabled OpenID Connect in the API server yet. We should also see `invalid bearer token` in the API server log output.
|
||||
|
||||
---
|
||||
|
||||
## Enabling OpenID Connect
|
||||
|
||||
- We need to add a few flags to the API server configuration
|
||||
|
||||
- These two are mandatory:
|
||||
|
||||
`--oidc-issuer-url` → URL of the OpenID provider
|
||||
|
||||
`--oidc-client-id` → app requesting the authentication
|
||||
<br/>(in our case, that's the ID for the Google OAuth Playground)
|
||||
|
||||
- This one is optional:
|
||||
|
||||
`--oidc-username-claim` → which field should be used as user name
|
||||
<br/>(we will use the user's email address instead of an opaque ID)
|
||||
|
||||
- See the [API server documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server
|
||||
) for more details about all available flags
|
||||
|
||||
---
|
||||
|
||||
## Updating the API server configuration
|
||||
|
||||
- The instructions below will work for clusters deployed with kubeadm
|
||||
|
||||
(or where the control plane is deployed in static pods)
|
||||
|
||||
- If your cluster is different, you will need to adapt them
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `/etc/kubernetes/manifests/kube-apiserver.yaml`
|
||||
|
||||
- Add the following lines to the list of command-line flags:
|
||||
```yaml
|
||||
- --oidc-issuer-url=https://accounts.google.com
|
||||
- --oidc-client-id=407408718192.apps.googleusercontent.com
|
||||
- --oidc-username-claim=email
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Restarting the API server
|
||||
|
||||
- The kubelet monitors the files in `/etc/kubernetes/manifests`
|
||||
|
||||
- When we save the pod manifest, kubelet will restart the corresponding pod
|
||||
|
||||
(using the updated command line flags)
|
||||
|
||||
.exercise[
|
||||
|
||||
- After making the changes described on the previous slide, save the file
|
||||
|
||||
- Issue a simple command (like `kubectl version`) until the API server is back up
|
||||
|
||||
(it might take between a few seconds and one minute for the API server to restart)
|
||||
|
||||
- Restart the `kubectl logs` command to view the logs of the API server
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using our JSON Web Token
|
||||
|
||||
- Now that the API server is set up to recognize our token, try again!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try an API command with our token:
|
||||
```bash
|
||||
kubectl --user=myjwt get nodes
|
||||
kubectl --user=myjwt get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a message like:
|
||||
```
|
||||
Error from server (Forbidden): nodes is forbidden: User "jean.doe@gmail.com"
|
||||
cannot list resource "nodes" in API group "" at the cluster scope
|
||||
```
|
||||
|
||||
→ We were successfully *authenticated*, but not *authorized*.
|
||||
|
||||
---
|
||||
|
||||
## Authorizing our user
|
||||
|
||||
- As an extra step, let's grant read access to our user
|
||||
|
||||
- We will use the pre-defined ClusterRole `view`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a ClusterRoleBinding allowing us read access to the cluster:
|
||||
```bash
|
||||
kubectl create clusterrolebinding i-can-view \
|
||||
--user=`jean.doe@gmail.com` --clusterrole=view
|
||||
```
|
||||
|
||||
- Confirm that we can now list pods with our token:
|
||||
```bash
|
||||
kubectl --user=myjwt get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## From demo to production
|
||||
|
||||
.warning[This was a very simplified demo! In a real deployment...]
|
||||
|
||||
- We wouldn't use the Google OAuth Playground
|
||||
|
||||
- We *probably* wouldn't even use Google at all
|
||||
|
||||
(it doesn't seem to provide a way to include groups!)
|
||||
|
||||
- Some popular alternatives:
|
||||
|
||||
- [Dex](https://github.com/dexidp/dex),
|
||||
[Keycloak](https://www.keycloak.org/)
|
||||
(self-hosted)
|
||||
|
||||
- [Okta](https://developer.okta.com/docs/how-to/creating-token-with-groups-claim/#step-five-decode-the-jwt-to-verify)
|
||||
(SaaS)
|
||||
|
||||
- We would use a helper (like the [kubelogin](https://github.com/int128/kubelogin) plugin) to automatically obtain tokens
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Service Account tokens
|
||||
|
||||
- The tokens used by Service Accounts are JWT tokens as well
|
||||
|
||||
- They are signed and verified using a special service account key pair
|
||||
|
||||
.exercise[
|
||||
|
||||
- Extract the token of a service account in the current namespace:
|
||||
```bash
|
||||
kubectl get secrets -o jsonpath={..token} | base64 -d
|
||||
```
|
||||
|
||||
- Copy-paste the token to a verification service like https://jwt.io
|
||||
|
||||
- Notice that it says "Invalid Signature"
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Verifying Service Account tokens
|
||||
|
||||
- JSON Web Tokens embed the URL of the "issuer" (=OpenID provider)
|
||||
|
||||
- The issuer provides its public key through a well-known discovery endpoint
|
||||
|
||||
(similar to https://accounts.google.com/.well-known/openid-configuration)
|
||||
|
||||
- There is no such endpoint for the Service Account key pair
|
||||
|
||||
- But we can provide the public key ourselves for verification
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Verifying a Service Account token
|
||||
|
||||
- On clusters provisioned with kubeadm, the Service Account key pair is:
|
||||
|
||||
`/etc/kubernetes/pki/sa.key` (used by the controller manager to generate tokens)
|
||||
|
||||
`/etc/kubernetes/pki/sa.pub` (used by the API server to validate the same tokens)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Display the public key used to sign Service Account tokens:
|
||||
```bash
|
||||
sudo cat /etc/kubernetes/pki/sa.pub
|
||||
```
|
||||
|
||||
- Copy-paste the key in the "verify signature" area on https://jwt.io
|
||||
|
||||
- It should now say "Signature Verified"
|
||||
|
||||
]
|
||||
356
slides/k8s/operators-design.md
Normal file
356
slides/k8s/operators-design.md
Normal file
@@ -0,0 +1,356 @@
|
||||
# Designing an operator (extra material)
|
||||
|
||||
- Writing a quick-and-dirty operator, or a POC/MVP, is easy
|
||||
|
||||
- Writing a robust operator is hard
|
||||
|
||||
- We will describe the general idea
|
||||
|
||||
- We will identify some of the associated challenges
|
||||
|
||||
- We will list a few tools that can help us
|
||||
|
||||
---
|
||||
|
||||
## Top-down vs. bottom-up
|
||||
|
||||
- Both approaches are possible
|
||||
|
||||
- Let's see what they entail, and their respective pros and cons
|
||||
|
||||
---
|
||||
|
||||
## Top-down approach
|
||||
|
||||
- Start with high-level design (see next slide)
|
||||
|
||||
- Pros:
|
||||
|
||||
- can yield cleaner design that will be more robust
|
||||
|
||||
- Cons:
|
||||
|
||||
- must be able to anticipate all the events that might happen
|
||||
|
||||
- design will be better only to the extend of what we anticipated
|
||||
|
||||
- hard to anticipate if we don't have production experience
|
||||
|
||||
---
|
||||
|
||||
## High-level design
|
||||
|
||||
- What are we solving?
|
||||
|
||||
(e.g.: geographic databases backed by PostGIS with Redis caches)
|
||||
|
||||
- What are our use-cases, stories?
|
||||
|
||||
(e.g.: adding/resizing caches and read replicas; load balancing queries)
|
||||
|
||||
- What kind of outage do we want to address?
|
||||
|
||||
(e.g.: loss of individual node, pod, volume)
|
||||
|
||||
- What are our *non-features*, the things we don't want to address?
|
||||
|
||||
(e.g.: loss of datacenter/zone; differentiating between read and write queries;
|
||||
<br/>
|
||||
cache invalidation; upgrading to newer major versions of Redis, PostGIS, PostgreSQL)
|
||||
|
||||
---
|
||||
|
||||
## Low-level design
|
||||
|
||||
- What Custom Resource Definitions do we need?
|
||||
|
||||
(one, many?)
|
||||
|
||||
- How will we store configuration information?
|
||||
|
||||
(part of the CRD spec fields, annotations, other?)
|
||||
|
||||
- Do we need to store state? If so, where?
|
||||
|
||||
- state that is small and doesn't change much can be stored via the Kubernetes API
|
||||
<br/>
|
||||
(e.g.: leader information, configuration, credentials)
|
||||
|
||||
- things that are big and/or change a lot should go elsewhere
|
||||
<br/>
|
||||
(e.g.: metrics, bigger configuration file like GeoIP)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What can we store via the Kubernetes API?
|
||||
|
||||
- The API server stores most Kubernetes resources into etcd
|
||||
|
||||
- Etcd is designed for reliability, not for performance
|
||||
|
||||
- If our storage needs exceed what etcd can offer, we need to use something else:
|
||||
|
||||
- either directly
|
||||
|
||||
- or by extending the API server
|
||||
<br/>(for instance by using the agregation layer, like [metrics server](https://github.com/kubernetes-incubator/metrics-server) does)
|
||||
|
||||
---
|
||||
|
||||
## Bottom-up approach
|
||||
|
||||
- Start with existing Kubernetes resources (Deployment, Stateful Set...)
|
||||
|
||||
- Run the system in production
|
||||
|
||||
- Add scripts, automation, to facilitate day-to-day operations
|
||||
|
||||
- Turn the scripts into an operator
|
||||
|
||||
- Pros: simpler to get started; reflects actual use-cases
|
||||
|
||||
- Cons: can result in convoluted designs requiring extensive refactor
|
||||
|
||||
---
|
||||
|
||||
## General idea
|
||||
|
||||
- Our operator will watch its CRDs *and associated resources*
|
||||
|
||||
- Drawing state diagrams and finite state automata helps a lot
|
||||
|
||||
- It's OK if some transitions lead to a big catch-all "human intervention"
|
||||
|
||||
- Over time, we will learn about new failure modes and add to these diagrams
|
||||
|
||||
- It's OK to start with CRD creation / deletion and prevent any modification
|
||||
|
||||
(that's the easy POC/MVP we were talking about)
|
||||
|
||||
- *Presentation* and *validation* will help our users
|
||||
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## Challenges
|
||||
|
||||
- Reacting to infrastructure disruption can seem hard at first
|
||||
|
||||
- Kubernetes gives us a lot of primitives to help:
|
||||
|
||||
- Pods and Persistent Volumes will *eventually* recover
|
||||
|
||||
- Stateful Sets give us easy ways to "add N copies" of a thing
|
||||
|
||||
- The real challenges come with configuration changes
|
||||
|
||||
(i.e., what to do when our users update our CRDs)
|
||||
|
||||
- Keep in mind that [some] of the [largest] cloud [outages] haven't been caused by [natural catastrophes], or even code bugs, but by configuration changes
|
||||
|
||||
[some]: https://www.datacenterdynamics.com/news/gcp-outage-mainone-leaked-google-cloudflare-ip-addresses-china-telecom/
|
||||
[largest]: https://aws.amazon.com/message/41926/
|
||||
[outages]: https://aws.amazon.com/message/65648/
|
||||
[natural catastrophes]: https://www.datacenterknowledge.com/amazon/aws-says-it-s-never-seen-whole-data-center-go-down
|
||||
|
||||
---
|
||||
|
||||
## Configuration changes
|
||||
|
||||
- It is helpful to analyze and understand how Kubernetes controllers work:
|
||||
|
||||
- watch resource for modifications
|
||||
|
||||
- compare desired state (CRD) and current state
|
||||
|
||||
- issue actions to converge state
|
||||
|
||||
- Configuration changes will probably require *another* state diagram or FSA
|
||||
|
||||
- Again, it's OK to have transitions labeled as "unsupported"
|
||||
|
||||
(i.e. reject some modifications because we can't execute them)
|
||||
|
||||
---
|
||||
|
||||
## Tools
|
||||
|
||||
- CoreOS / RedHat Operator Framework
|
||||
|
||||
[GitHub](https://github.com/operator-framework)
|
||||
|
|
||||
[Blog](https://developers.redhat.com/blog/2018/12/18/introduction-to-the-kubernetes-operator-framework/)
|
||||
|
|
||||
[Intro talk](https://www.youtube.com/watch?v=8k_ayO1VRXE)
|
||||
|
|
||||
[Deep dive talk](https://www.youtube.com/watch?v=fu7ecA2rXmc)
|
||||
|
||||
- Zalando Kubernetes Operator Pythonic Framework (KOPF)
|
||||
|
||||
[GitHub](https://github.com/zalando-incubator/kopf)
|
||||
|
|
||||
[Docs](https://kopf.readthedocs.io/)
|
||||
|
|
||||
[Step-by-step tutorial](https://kopf.readthedocs.io/en/stable/walkthrough/problem/)
|
||||
|
||||
- Mesosphere Kubernetes Universal Declarative Operator (KUDO)
|
||||
|
||||
[GitHub](https://github.com/kudobuilder/kudo)
|
||||
|
|
||||
[Blog](https://mesosphere.com/blog/announcing-maestro-a-declarative-no-code-approach-to-kubernetes-day-2-operators/)
|
||||
|
|
||||
[Docs](https://kudo.dev/)
|
||||
|
|
||||
[Zookeeper example](https://github.com/kudobuilder/frameworks/tree/master/repo/stable/zookeeper)
|
||||
|
||||
---
|
||||
|
||||
## Validation
|
||||
|
||||
- By default, a CRD is "free form"
|
||||
|
||||
(we can put pretty much anything we want in it)
|
||||
|
||||
- When creating a CRD, we can provide an OpenAPI v3 schema
|
||||
([Example](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml#L34))
|
||||
|
||||
- The API server will then validate resources created/edited with this schema
|
||||
|
||||
- If we need a stronger validation, we can use a Validating Admission Webhook:
|
||||
|
||||
- run an [admission webhook server](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#write-an-admission-webhook-server) to receive validation requests
|
||||
|
||||
- register the webhook by creating a [ValidatingWebhookConfiguration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#configure-admission-webhooks-on-the-fly)
|
||||
|
||||
- each time the API server receives a request matching the configuration,
|
||||
<br/>the request is sent to our server for validation
|
||||
|
||||
---
|
||||
|
||||
## Presentation
|
||||
|
||||
- By default, `kubectl get mycustomresource` won't display much information
|
||||
|
||||
(just the name and age of each resource)
|
||||
|
||||
- When creating a CRD, we can specify additional columns to print
|
||||
([Example](https://github.com/amaizfinance/redis-operator/blob/master/deploy/crds/k8s_v1alpha1_redis_crd.yaml#L6),
|
||||
[Docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#additional-printer-columns))
|
||||
|
||||
- By default, `kubectl describe mycustomresource` will also be generic
|
||||
|
||||
- `kubectl describe` can show events related to our custom resources
|
||||
|
||||
(for that, we need to create Event resources, and fill the `involvedObject` field)
|
||||
|
||||
- For scalable resources, we can define a `scale` sub-resource
|
||||
|
||||
- This will enable the use of `kubectl scale` and other scaling-related operations
|
||||
|
||||
---
|
||||
|
||||
## About scaling
|
||||
|
||||
- It is possible to use the HPA (Horizontal Pod Autoscaler) with CRDs
|
||||
|
||||
- But it is not always desirable
|
||||
|
||||
- The HPA works very well for homogenous, stateless workloads
|
||||
|
||||
- For other workloads, your mileage may vary
|
||||
|
||||
- Some systems can scale across multiple dimensions
|
||||
|
||||
(for instance: increase number of replicas, or number of shards?)
|
||||
|
||||
- If autoscaling is desired, the operator will have to take complex decisions
|
||||
|
||||
(example: Zalando's Elasticsearch Operator ([Video](https://www.youtube.com/watch?v=lprE0J0kAq0)))
|
||||
|
||||
---
|
||||
|
||||
## Versioning
|
||||
|
||||
- As our operator evolves over time, we may have to change the CRD
|
||||
|
||||
(add, remove, change fields)
|
||||
|
||||
- Like every other resource in Kubernetes, [custom resources are versioned](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/
|
||||
)
|
||||
|
||||
- When creating a CRD, we need to specify a *list* of versions
|
||||
|
||||
- Versions can be marked as `stored` and/or `served`
|
||||
|
||||
---
|
||||
|
||||
## Stored version
|
||||
|
||||
- Exactly one version has to be marked as the `stored` version
|
||||
|
||||
- As the name implies, it is the one that will be stored in etcd
|
||||
|
||||
- Resources in storage are never converted automatically
|
||||
|
||||
(we need to read and re-write them ourselves)
|
||||
|
||||
- Yes, this means that we can have different versions in etcd at any time
|
||||
|
||||
- Our code needs to handle all the versions that still exist in storage
|
||||
|
||||
---
|
||||
|
||||
## Served versions
|
||||
|
||||
- By default, the Kubernetes API will serve resources "as-is"
|
||||
|
||||
(using their stored version)
|
||||
|
||||
- It will assume that all versions are compatible storage-wise
|
||||
|
||||
(i.e. that the spec and fields are compatible between versions)
|
||||
|
||||
- We can provide [conversion webhooks](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/#webhook-conversion) to "translate" requests
|
||||
|
||||
(the alternative is to upgrade all stored resources and stop serving old versions)
|
||||
|
||||
---
|
||||
|
||||
## Operator reliability
|
||||
|
||||
- Remember that the operator itself must be resilient
|
||||
|
||||
(e.g.: the node running it can fail)
|
||||
|
||||
- Our operator must be able to restart and recover gracefully
|
||||
|
||||
- Do not store state locally
|
||||
|
||||
(unless we can reconstruct that state when we restart)
|
||||
|
||||
- As indicated earlier, we can use the Kubernetes API to store data:
|
||||
|
||||
- in the custom resources themselves
|
||||
|
||||
- in other resources' annotations
|
||||
|
||||
---
|
||||
|
||||
## Beyond CRDs
|
||||
|
||||
- CRDs cannot use custom storage (e.g. for time series data)
|
||||
|
||||
- CRDs cannot support arbitrary subresources (like logs or exec for Pods)
|
||||
|
||||
- CRDs cannot support protobuf (for faster, more efficient communication)
|
||||
|
||||
- If we need these things, we can use the [aggregation layer](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) instead
|
||||
|
||||
- The aggregation layer proxies all requests below a specific path to another server
|
||||
|
||||
(this is used e.g. by the metrics server)
|
||||
|
||||
- [This documentation page](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#choosing-a-method-for-adding-custom-resources) compares the features of CRDs and API aggregation
|
||||
389
slides/k8s/operators.md
Normal file
389
slides/k8s/operators.md
Normal file
@@ -0,0 +1,389 @@
|
||||
# Operators
|
||||
|
||||
- Operators are one of the many ways to extend Kubernetes
|
||||
|
||||
- We will define operators
|
||||
|
||||
- We will see how they work
|
||||
|
||||
- We will install a specific operator (for ElasticSearch)
|
||||
|
||||
- We will use it to provision an ElasticSearch cluster
|
||||
|
||||
---
|
||||
|
||||
## What are operators?
|
||||
|
||||
*An operator represents **human operational knowledge in software,**
|
||||
<br/>
|
||||
to reliably manage an application.
|
||||
— [CoreOS](https://coreos.com/blog/introducing-operators.html)*
|
||||
|
||||
Examples:
|
||||
|
||||
- Deploying and configuring replication with MySQL, PostgreSQL ...
|
||||
|
||||
- Setting up Elasticsearch, Kafka, RabbitMQ, Zookeeper ...
|
||||
|
||||
- Reacting to failures when intervention is needed
|
||||
|
||||
- Scaling up and down these systems
|
||||
|
||||
---
|
||||
|
||||
## What are they made from?
|
||||
|
||||
- Operators combine two things:
|
||||
|
||||
- Custom Resource Definitions
|
||||
|
||||
- controller code watching the corresponding resources and acting upon them
|
||||
|
||||
- A given operator can define one or multiple CRDs
|
||||
|
||||
- The controller code (control loop) typically runs within the cluster
|
||||
|
||||
(running as a Deployment with 1 replica is a common scenario)
|
||||
|
||||
- But it could also run elsewhere
|
||||
|
||||
(nothing mandates that the code run on the cluster, as long as it has API access)
|
||||
|
||||
---
|
||||
|
||||
## Why use operators?
|
||||
|
||||
- Kubernetes gives us Deployments, StatefulSets, Services ...
|
||||
|
||||
- These mechanisms give us building blocks to deploy applications
|
||||
|
||||
- They work great for services that are made of *N* identical containers
|
||||
|
||||
(like stateless ones)
|
||||
|
||||
- They also work great for some stateful applications like Consul, etcd ...
|
||||
|
||||
(with the help of highly persistent volumes)
|
||||
|
||||
- They're not enough for complex services:
|
||||
|
||||
- where different containers have different roles
|
||||
|
||||
- where extra steps have to be taken when scaling or replacing containers
|
||||
|
||||
---
|
||||
|
||||
## Use-cases for operators
|
||||
|
||||
- Systems with primary/secondary replication
|
||||
|
||||
Examples: MariaDB, MySQL, PostgreSQL, Redis ...
|
||||
|
||||
- Systems where different groups of nodes have different roles
|
||||
|
||||
Examples: ElasticSearch, MongoDB ...
|
||||
|
||||
- Systems with complex dependencies (that are themselves managed with operators)
|
||||
|
||||
Examples: Flink or Kafka, which both depend on Zookeeper
|
||||
|
||||
---
|
||||
|
||||
## More use-cases
|
||||
|
||||
- Representing and managing external resources
|
||||
|
||||
(Example: [AWS Service Operator](https://operatorhub.io/operator/alpha/aws-service-operator.v0.0.1))
|
||||
|
||||
- Managing complex cluster add-ons
|
||||
|
||||
(Example: [Istio operator](https://operatorhub.io/operator/beta/istio-operator.0.1.6))
|
||||
|
||||
- Deploying and managing our applications' lifecycles
|
||||
|
||||
(more on that later)
|
||||
|
||||
---
|
||||
|
||||
## How operators work
|
||||
|
||||
- An operator creates one or more CRDs
|
||||
|
||||
(i.e., it creates new "Kinds" of resources on our cluster)
|
||||
|
||||
- The operator also runs a *controller* that will watch its resources
|
||||
|
||||
- Each time we create/update/delete a resource, the controller is notified
|
||||
|
||||
(we could write our own cheap controller with `kubectl get --watch`)
|
||||
|
||||
---
|
||||
|
||||
## One operator in action
|
||||
|
||||
- We will install the UPMC Enterprises ElasticSearch operator
|
||||
|
||||
- This operator requires PersistentVolumes
|
||||
|
||||
- We will install Rancher's [local path storage provisioner](https://github.com/rancher/local-path-provisioner) to automatically create these
|
||||
|
||||
- Then, we will create an ElasticSearch resource
|
||||
|
||||
- The operator will detect that resource and provision the cluster
|
||||
|
||||
---
|
||||
|
||||
## Installing a Persistent Volume provisioner
|
||||
|
||||
(This step can be skipped if you already have a dynamic volume provisioner.)
|
||||
|
||||
- This provisioner creates Persistent Volumes backed by `hostPath`
|
||||
|
||||
(local directories on our nodes)
|
||||
|
||||
- It doesn't require anything special ...
|
||||
|
||||
- ... But losing a node = losing the volumes on that node!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the local path storage provisioner:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/local-path-storage.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Making sure we have a default StorageClass
|
||||
|
||||
- The ElasticSearch operator will create StatefulSets
|
||||
|
||||
- These StatefulSets will instantiate PersistentVolumeClaims
|
||||
|
||||
- These PVCs need to be explicitly associated with a StorageClass
|
||||
|
||||
- Or we need to tag a StorageClass to be used as the default one
|
||||
|
||||
.exercise[
|
||||
|
||||
- List StorageClasses:
|
||||
```bash
|
||||
kubectl get storageclasses
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see the `local-path` StorageClass.
|
||||
|
||||
---
|
||||
|
||||
## Setting a default StorageClass
|
||||
|
||||
- This is done by adding an annotation to the StorageClass:
|
||||
|
||||
`storageclass.kubernetes.io/is-default-class: true`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tag the StorageClass so that it's the default one:
|
||||
```bash
|
||||
kubectl annotate storageclass local-path \
|
||||
storageclass.kubernetes.io/is-default-class=true
|
||||
```
|
||||
|
||||
- Check the result:
|
||||
```bash
|
||||
kubectl get storageclasses
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Now, the StorageClass should have `(default)` next to its name.
|
||||
|
||||
---
|
||||
|
||||
## Install the ElasticSearch operator
|
||||
|
||||
- The operator needs:
|
||||
|
||||
- a Deployment for its controller
|
||||
- a ServiceAccount, ClusterRole, ClusterRoleBinding for permissions
|
||||
- a Namespace
|
||||
|
||||
- We have grouped all the definitions for these resources in a YAML file
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the operator:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/elasticsearch-operator.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Wait for the operator to be ready
|
||||
|
||||
- Some operators require to create their CRDs separately
|
||||
|
||||
- This operator will create its CRD itself
|
||||
|
||||
(i.e. the CRD is not listed in the YAML that we applied earlier)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Wait until the `elasticsearchclusters` CRD shows up:
|
||||
```bash
|
||||
kubectl get crds
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Create an ElasticSearch resource
|
||||
|
||||
- We can now create a resource with `kind: ElasticsearchCluster`
|
||||
|
||||
- The YAML for that resource will specify all the desired parameters:
|
||||
|
||||
- how many nodes do we want of each type (client, master, data)
|
||||
- image to use
|
||||
- add-ons (kibana, cerebro, ...)
|
||||
- whether to use TLS or not
|
||||
- etc.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create our ElasticSearch cluster:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/elasticsearch-cluster.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Operator in action
|
||||
|
||||
- Over the next minutes, the operator will create:
|
||||
|
||||
- StatefulSets (one for master nodes, one for data nodes)
|
||||
|
||||
- Deployments (for client nodes; and for add-ons like cerebro and kibana)
|
||||
|
||||
- Services (for all these pods)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Wait for all the StatefulSets to be fully up and running:
|
||||
```bash
|
||||
kubectl get statefulsets -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our cluster
|
||||
|
||||
- Since connecting directly to the ElasticSearch API is a bit raw,
|
||||
<br/>we'll connect to the cerebro frontend instead
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the cerebro service to change its type from ClusterIP to NodePort:
|
||||
```bash
|
||||
kubectl patch svc cerebro-es -p "spec: { type: NodePort }"
|
||||
```
|
||||
|
||||
- Retrieve the NodePort that was allocated:
|
||||
```bash
|
||||
kubectl get svc cerebreo-es
|
||||
```
|
||||
|
||||
- Connect to that port with a browser
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## (Bonus) Setup filebeat
|
||||
|
||||
- Let's send some data to our brand new ElasticSearch cluster!
|
||||
|
||||
- We'll deploy a filebeat DaemonSet to collect node logs
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy filebeat:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/filebeat.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see at least one index being created in cerebro.
|
||||
|
||||
---
|
||||
|
||||
## (Bonus) Access log data with kibana
|
||||
|
||||
- Let's expose kibana (by making kibana-es a NodePort too)
|
||||
|
||||
- Then access kibana
|
||||
|
||||
- We'll need to configure kibana indexes
|
||||
|
||||
---
|
||||
|
||||
## Deploying our apps with operators
|
||||
|
||||
- It is very simple to deploy with `kubectl run` / `kubectl expose`
|
||||
|
||||
- We can unlock more features by writing YAML and using `kubectl apply`
|
||||
|
||||
- Kustomize or Helm let us deploy in multiple environments
|
||||
|
||||
(and adjust/tweak parameters in each environment)
|
||||
|
||||
- We can also use an operator to deploy our application
|
||||
|
||||
---
|
||||
|
||||
## Pros and cons of deploying with operators
|
||||
|
||||
- The app definition and configuration is persisted in the Kubernetes API
|
||||
|
||||
- Multiple instances of the app can be manipulated with `kubectl get`
|
||||
|
||||
- We can add labels, annotations to the app instances
|
||||
|
||||
- Our controller can execute custom code for any lifecycle event
|
||||
|
||||
- However, we need to write this controller
|
||||
|
||||
- We need to be careful about changes
|
||||
|
||||
(what happens when the resource `spec` is updated?)
|
||||
|
||||
---
|
||||
|
||||
## Operators are not magic
|
||||
|
||||
- Look at the ElasticSearch resource definition
|
||||
|
||||
(`~/container.training/k8s/elasticsearch-cluster.yaml`)
|
||||
|
||||
- What should happen if we flip the `use-tls` flag? Twice?
|
||||
|
||||
- What should happen if we remove / re-add the kibana or cerebro sections?
|
||||
|
||||
- What should happen if we change the number of nodes?
|
||||
|
||||
- What if we want different images or parameters for the different nodes?
|
||||
|
||||
*Operators can be very powerful, iff we know exactly the scenarios that they can handle.*
|
||||
@@ -26,8 +26,7 @@ chapters:
|
||||
- - k8s/multinode.md
|
||||
- k8s/cni.md
|
||||
- k8s/apilb.md
|
||||
#FIXME: check le talk de Laurent Corbes pour voir s'il y a d'autres choses utiles à mentionner
|
||||
#BONUS: intégration CoreDNS pour résoudre les noms des clusters des voisins
|
||||
- k8s/control-plane-auth.md
|
||||
- - k8s/setup-managed.md
|
||||
- k8s/setup-selfhosted.md
|
||||
- k8s/cluster-upgrade.md
|
||||
|
||||
@@ -4,12 +4,12 @@ title: |
|
||||
and operators
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-20190610-sanjose)"
|
||||
#chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
slides: http://kadm-2019-06.container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
@@ -60,7 +60,7 @@ chapters:
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
- |
|
||||
# (All content after this slide is bonus material)
|
||||
# (All content after this is bonus material)
|
||||
# EXTRA
|
||||
- - k8s/volumes.md
|
||||
- k8s/configuration.md
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- - k8s/rollout.md
|
||||
- k8s/namespaces.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
#- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- k8s/netpol.md
|
||||
#- k8s/authn-authz.md
|
||||
#- k8s/csr-api.md
|
||||
#- k8s/podsecuritypolicy.md
|
||||
#- k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
#- k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
#- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/extending-api.md
|
||||
#- k8s/statefulsets.md
|
||||
#- k8s/local-persistent-volumes.md
|
||||
#- k8s/portworx.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,66 +0,0 @@
|
||||
title: |
|
||||
Kubernetes 101
|
||||
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
# Bridget-specific; others use logistics.md
|
||||
- logistics-bridget.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
# Bridget doesn't go into as much depth with compose
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- - k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
#- k8s/kubectlproxy.md
|
||||
#- k8s/localkubeconfig.md
|
||||
#- k8s/accessinternal.md
|
||||
- - k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- - k8s/logs-cli.md
|
||||
# Bridget hasn't added EFK yet
|
||||
#- k8s/logs-centralized.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
#- k8s/kustomize.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/whatsnext.md
|
||||
# - k8s/links.md
|
||||
# Bridget-specific
|
||||
- k8s/links-bridget.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,77 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Docker and Kubernetes
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- - k8s/kubenet.md
|
||||
- k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- - k8s/kubectlexpose.md
|
||||
- k8s/shippingimages.md
|
||||
- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
- - k8s/kubectlscale.md
|
||||
# - k8s/scalingdockercoins.md
|
||||
# - shared/hastyconclusions.md
|
||||
- k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/namespaces.md
|
||||
- - k8s/kustomize.md
|
||||
- k8s/helm.md
|
||||
- k8s/create-chart.md
|
||||
- k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
- - k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/volumes.md
|
||||
- k8s/build-with-docker.md
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- - k8s/owners-and-dependents.md
|
||||
- k8s/extending-api.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,77 +0,0 @@
|
||||
title: |
|
||||
Deploying and Scaling Microservices
|
||||
with Kubernetes
|
||||
|
||||
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
chat: "In person!"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- k8s/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- k8s/versions-k8s.md
|
||||
- shared/sampleapp.md
|
||||
#- shared/composescale.md
|
||||
#- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- k8s/concepts-k8s.md
|
||||
- shared/declarative.md
|
||||
- k8s/declarative.md
|
||||
- k8s/kubenet.md
|
||||
- - k8s/kubectlget.md
|
||||
- k8s/setup-k8s.md
|
||||
- k8s/kubectlrun.md
|
||||
- k8s/deploymentslideshow.md
|
||||
- k8s/kubectlexpose.md
|
||||
- - k8s/shippingimages.md
|
||||
#- k8s/buildshiprun-selfhosted.md
|
||||
- k8s/buildshiprun-dockerhub.md
|
||||
- k8s/ourapponkube.md
|
||||
- k8s/kubectlproxy.md
|
||||
- k8s/localkubeconfig.md
|
||||
- k8s/accessinternal.md
|
||||
- k8s/dashboard.md
|
||||
#- k8s/kubectlscale.md
|
||||
- k8s/scalingdockercoins.md
|
||||
- shared/hastyconclusions.md
|
||||
- - k8s/daemonset.md
|
||||
- k8s/rollout.md
|
||||
- k8s/namespaces.md
|
||||
- k8s/kustomize.md
|
||||
#- k8s/helm.md
|
||||
#- k8s/create-chart.md
|
||||
- - k8s/healthchecks.md
|
||||
- k8s/logs-cli.md
|
||||
- k8s/logs-centralized.md
|
||||
#- k8s/netpol.md
|
||||
- k8s/authn-authz.md
|
||||
- k8s/csr-api.md
|
||||
- k8s/podsecuritypolicy.md
|
||||
- - k8s/ingress.md
|
||||
#- k8s/gitworkflows.md
|
||||
- k8s/prometheus.md
|
||||
- - k8s/volumes.md
|
||||
#- k8s/build-with-docker.md
|
||||
#- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
#- k8s/owners-and-dependents.md
|
||||
#- k8s/extending-api.md
|
||||
- - k8s/statefulsets.md
|
||||
- k8s/local-persistent-volumes.md
|
||||
- k8s/portworx.md
|
||||
#- k8s/staticpods.md
|
||||
- - k8s/whatsnext.md
|
||||
- k8s/links.md
|
||||
- shared/thankyou.md
|
||||
@@ -1,32 +1,30 @@
|
||||
## Intros
|
||||
|
||||
- This slide should be customized by the tutorial instructor(s).
|
||||
|
||||
- Hello! We are:
|
||||
|
||||
- .emoji[👩🏻🏫] Ann O'Nymous ([@...](https://twitter.com/...), Megacorp Inc)
|
||||
|
||||
- .emoji[👨🏾🎓] Stu Dent ([@...](https://twitter.com/...), University of Wakanda)
|
||||
|
||||
<!-- .dummy[
|
||||
|
||||
- .emoji[👷🏻♀️] AJ ([@s0ulshake](https://twitter.com/s0ulshake), Travis CI)
|
||||
|
||||
- .emoji[🚁] Alexandre ([@alexbuisine](https://twitter.com/alexbuisine), Enix SAS)
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Tiny Shell Script LLC)
|
||||
|
||||
- .emoji[🐳] Jérôme ([@jpetazzo](https://twitter.com/jpetazzo), Enix SAS)
|
||||
--
|
||||
|
||||
- .emoji[⛵] Jérémy ([@jeremygarrouste](twitter.com/jeremygarrouste), Inpiwee)
|
||||
- Take a moment to introduce yourself to the person(s) next to you!
|
||||
|
||||
- .emoji[🎧] Romain ([@rdegez](https://twitter.com/rdegez), Enix SAS)
|
||||
- What company or organization are you from?
|
||||
|
||||
] -->
|
||||
- What are you looking to learn from this training?
|
||||
|
||||
- The workshop will run from ...
|
||||
---
|
||||
|
||||
- There will be a lunch break at ...
|
||||
## Logistics
|
||||
|
||||
(And coffee breaks!)
|
||||
- The tutorial will run from 9am to 5pm both days
|
||||
|
||||
- Morning breaks will be at 10:30am in East Lobby / Almaden Foyer
|
||||
|
||||
- Lunch will be served at 12:30pm in Grand Ballroom Foyer
|
||||
|
||||
- Afternoon breaks will be at 3pm in East Lobby / Almaden Foyer
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
|
||||
@@ -9,3 +9,23 @@ class: title, in-person
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Final words
|
||||
|
||||
- **Please rate this session!** (with [this link](https://conferences.oreilly.com/velocity/vl-ca/public/schedule/detail/75313))
|
||||
|
||||
- If you liked this format, I can also teach the following courses:
|
||||
|
||||
- [two-day Kubernetes bootstrap](https://tinyshellscript.com/kubernetes-bootstrap.html)
|
||||
|
||||
- [four-day Kubernetes administrator training](https://tinyshellscript.com/kubernetes-ops-week.html)
|
||||
|
||||
- custom courses of any length, covering Docker, Swarm, Kubernetes
|
||||
|
||||
- If you want me to train your team, contact me:
|
||||
|
||||
jerome.petazzoni@gmail.com
|
||||
|
||||
.footnote[*Thank you!*]
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-auto
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/gui.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,64 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
#chat: "[Gitter](https://gitter.im/jpetazzo/workshop-yyyymmdd-city)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- btp-manual
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
#- swarm/hostingregistry.md
|
||||
#- swarm/testingregistry.md
|
||||
#- swarm/btp-manual.md
|
||||
#- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- swarm/updatingservices.md
|
||||
#- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/ipsec.md
|
||||
#- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
#- swarm/secrets.md
|
||||
#- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
#- swarm/stateful.md
|
||||
#- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,73 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- swarm/cicd.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
@@ -1,72 +0,0 @@
|
||||
title: |
|
||||
Container Orchestration
|
||||
with Docker and Swarm
|
||||
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
|
||||
|
||||
gitrepo: github.com/jpetazzo/container.training
|
||||
|
||||
slides: http://container.training/
|
||||
|
||||
exclude:
|
||||
- in-person
|
||||
- btp-auto
|
||||
|
||||
chapters:
|
||||
- shared/title.md
|
||||
#- shared/logistics.md
|
||||
- swarm/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - shared/prereqs.md
|
||||
- shared/connecting.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- shared/sampleapp.md
|
||||
- shared/composescale.md
|
||||
- shared/hastyconclusions.md
|
||||
- shared/composedown.md
|
||||
- swarm/swarmkit.md
|
||||
- shared/declarative.md
|
||||
- swarm/swarmmode.md
|
||||
- swarm/creatingswarm.md
|
||||
#- swarm/machine.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/hostingregistry.md
|
||||
- swarm/testingregistry.md
|
||||
- swarm/btp-manual.md
|
||||
- swarm/swarmready.md
|
||||
- swarm/stacks.md
|
||||
- |
|
||||
name: part-2
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 2
|
||||
- - swarm/operatingswarm.md
|
||||
#- swarm/netshoot.md
|
||||
#- swarm/swarmnbt.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/rollingupdates.md
|
||||
#- swarm/healthchecks.md
|
||||
- swarm/nodeinfo.md
|
||||
- swarm/swarmtools.md
|
||||
- - swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
#- swarm/logging.md
|
||||
#- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- shared/thankyou.md
|
||||
- swarm/links.md
|
||||
Reference in New Issue
Block a user