Compare commits

..

10 Commits

Author SHA1 Message Date
Jerome Petazzoni
8ef6219295 fix-redirects.sh: adding forced redirect 2020-04-07 16:48:42 -05:00
Bridget Kromhout
346ce0e15c Merge pull request #304 from bridgetkromhout/devopsdaysmsp2018
testing changes for 90min
2018-07-10 18:03:16 -05:00
Bridget Kromhout
964d936435 Merge branch 'devopsdaysmsp2018' into devopsdaysmsp2018 2018-07-10 07:47:09 -05:00
Bridget Kromhout
546d9a2986 Testing redirect 2018-07-10 07:45:55 -05:00
Bridget Kromhout
8e5d27b185 changing redirects back 2018-07-10 07:40:49 -05:00
Bridget Kromhout
e8d9e94b72 First pass at edits for 90min workshop 2018-07-10 07:37:39 -05:00
Bridget Kromhout
ca980de2fd Merge branch 'master' of github.com:bridgetkromhout/container.training into devopsdaysmsp2018 2018-07-10 07:36:05 -05:00
Bridget Kromhout
4b2b5ff7e4 Merge pull request #303 from jpetazzo/master
bringing branch up to date
2018-07-10 07:34:08 -05:00
Bridget Kromhout
64fb407e8c Merge pull request #299 from bridgetkromhout/devopsdaysmsp2018
devopsdays MSP 2018-specific stuff
2018-07-06 16:04:20 -05:00
Bridget Kromhout
ea4f46599d Adding devopsdays MSP 2018 2018-07-06 16:02:02 -05:00
108 changed files with 547 additions and 2373 deletions

View File

@@ -1,222 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: elasticsearch
name: elasticsearch
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: elasticsearch
spec:
containers:
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: elasticsearch
name: elasticsearch
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: kibana
name: kibana
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: kibana
name: kibana
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,167 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,14 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-testcurl-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress:
- from:
- podSelector:
matchLabels:
run: testcurl

View File

@@ -1,10 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-all-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress: []

View File

@@ -1,67 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
run: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: socat
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
run: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -93,7 +93,7 @@ wrap Run this program in a container
- The `./workshopctl` script can be executed directly.
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
### Example Steps to Launch a Batch of AWS Instances for a Workshop

View File

@@ -85,7 +85,7 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>

View File

@@ -168,22 +168,6 @@ _cmd_kube() {
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
fi"
# Install stern
pssh "
if [ ! -x /usr/local/bin/stern ]; then
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
sep "Done"
}

View File

@@ -13,7 +13,6 @@ COMPOSE_VERSION = config["compose_version"]
MACHINE_VERSION = config["machine_version"]
CLUSTER_SIZE = config["clustersize"]
ENGINE_VERSION = config["engine_version"]
DOCKER_USER_PASSWORD = config["docker_user_password"]
#################################
@@ -55,9 +54,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
ipv4 = open("/tmp/ipv4").read()
# Add a "docker" user with password coming from the settings
# Add a "docker" user with password "training"
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
system("echo docker:training | sudo chpasswd")
# Fancy prompt courtesy of @soulshake.
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL

View File

@@ -22,6 +22,3 @@ engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -85,7 +85,7 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

2
slides/_redirects Normal file
View File

@@ -0,0 +1,2 @@
/ /kube-90min.yml.html 200!

View File

@@ -29,10 +29,6 @@ class State(object):
self.interactive = True
self.verify_status = False
self.simulate_type = True
self.switch_desktop = False
self.sync_slides = False
self.open_links = False
self.run_hidden = True
self.slide = 1
self.snippet = 0
@@ -41,10 +37,6 @@ class State(object):
self.interactive = bool(data["interactive"])
self.verify_status = bool(data["verify_status"])
self.simulate_type = bool(data["simulate_type"])
self.switch_desktop = bool(data["switch_desktop"])
self.sync_slides = bool(data["sync_slides"])
self.open_links = bool(data["open_links"])
self.run_hidden = bool(data["run_hidden"])
self.slide = int(data["slide"])
self.snippet = int(data["snippet"])
@@ -54,10 +46,6 @@ class State(object):
interactive=self.interactive,
verify_status=self.verify_status,
simulate_type=self.simulate_type,
switch_desktop=self.switch_desktop,
sync_slides=self.sync_slides,
open_links=self.open_links,
run_hidden=self.run_hidden,
slide=self.slide,
snippet=self.snippet,
), f, default_flow_style=False)
@@ -134,20 +122,14 @@ class Slide(object):
def focus_slides():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "3"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_terminal():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "2"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_browser():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "4"])
subprocess.check_output(["i3-msg", "workspace", "1"])
@@ -325,21 +307,17 @@ while True:
slide = slides[state.slide]
snippet = slide.snippets[state.snippet-1] if state.snippet else None
click.clear()
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}]"
.format(state.slide, len(slides)-1,
state.snippet, len(slide.snippets) if slide.snippets else 0,
state.simulate_type, state.verify_status,
state.switch_desktop, state.sync_slides,
state.open_links, state.run_hidden))
state.simulate_type, state.verify_status))
print(hrule())
if snippet:
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
focus_terminal()
else:
print(slide.content)
if state.sync_slides:
subprocess.check_output(["./gotoslide.js", str(slide.number)])
subprocess.check_output(["./gotoslide.js", str(slide.number)])
focus_slides()
print(hrule())
if state.interactive:
@@ -348,10 +326,6 @@ while True:
print("n/→ Next")
print("s Simulate keystrokes")
print("v Validate exit status")
print("d Switch desktop")
print("k Sync slides")
print("o Open links")
print("h Run hidden commands")
print("g Go to a specific slide")
print("q Quit")
print("c Continue non-interactively until next error")
@@ -367,14 +341,6 @@ while True:
state.simulate_type = not state.simulate_type
elif command == "v":
state.verify_status = not state.verify_status
elif command == "d":
state.switch_desktop = not state.switch_desktop
elif command == "k":
state.sync_slides = not state.sync_slides
elif command == "o":
state.open_links = not state.open_links
elif command == "h":
state.run_hidden = not state.run_hidden
elif command == "g":
state.slide = click.prompt("Enter slide number", type=int)
state.snippet = 0
@@ -400,7 +366,7 @@ while True:
logging.info("Running with method {}: {}".format(method, data))
if method == "keys":
send_keys(data)
elif method == "bash" or (method == "hide" and state.run_hidden):
elif method == "bash":
# Make sure that we're ready
wait_for_prompt()
# Strip leading spaces
@@ -439,12 +405,11 @@ while True:
screen = capture_pane()
url = data.replace("/node1", "/{}".format(IPADDR))
# This should probably be adapted to run on different OS
if state.open_links:
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
else:
logging.warning("Unknown method {}: {!r}".format(method, data))
move_forward()

View File

@@ -1 +0,0 @@
click

41
slides/common/prereqs.md Normal file
View File

@@ -0,0 +1,41 @@
## Hands-on
- All hands-on sections are clearly identified, like the gray rectangle below
.exercise[
- This is the stuff you're supposed to do!
- Go to @@SLIDES@@ to view these slides
]
- Each person gets a private cluster of cloud VMs (not shared with anybody else)
- All you need is a computer (or even a phone or tablet!), with:
- an internet connection
- a web browser
- an SSH client
---
class: in-person
## Connecting to our lab environment
.exercise[
- Log into the first VM (`node1`) with your SSH client
- Check that you can SSH (without password) to `node2`:
```bash
ssh node2
```
- Type `exit` or `^D` to come back to `node1`
]
If anything goes wrong — ask for help!

View File

@@ -8,9 +8,8 @@
<!--
```bash
cd ~
if [ -d container.training ]; then
mv container.training container.training.$RANDOM
mv container.training container.training.$$
fi
```
-->
@@ -95,61 +94,6 @@ class: extra-details
---
## Service discovery in container-land
- We do not hard-code IP addresses in the code
- We do not hard-code FQDN in the code, either
- We just connect to a service name, and container-magic does the rest
(And by container-magic, we mean "a crafty, dynamic, embedded DNS server")
---
## Example in `worker/worker.py`
```python
redis = Redis("`redis`")
def get_random_bytes():
r = requests.get("http://`rng`/32")
return r.content
def hash_bytes(data):
r = requests.post("http://`hasher`/",
data=data,
headers={"Content-Type": "application/octet-stream"})
```
(Full source code available [here](
https://@@GITREPO@@/blob/8279a3bce9398f7c1a53bdd95187c53eda4e6435/dockercoins/worker/worker.py#L17
))
---
class: extra-details
## Links, naming, and service discovery
- Containers can have network aliases (resolvable through DNS)
- Compose file version 2+ makes each container reachable through its service name
- Compose file version 1 did require "links" sections
- Network aliases are automatically namespaced
- you can have multiple apps declaring and using a service named `database`
- containers in the blue app will resolve `database` to the IP of the blue database
- containers in the green app will resolve `database` to the IP of the green database
---
## What's this application?
--

View File

@@ -113,13 +113,7 @@ for item in items:
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]

View File

@@ -1,50 +1,9 @@
- date: 2018-11-23
city: Copenhagen
country: dk
event: GOTO
title: Build Container Orchestration with Docker Swarm
speaker: bretfisher
attend: https://gotocph.com/2018/workshops/121
- date: 2018-11-08
city: San Francisco, CA
country: us
event: QCON
title: Introduction to Docker and Containers
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
- date: 2018-11-09
city: San Francisco, CA
country: us
event: QCON
title: Getting Started With Kubernetes and Container Orchestration
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration
- date: 2018-10-31
city: London, UK
country: uk
event: Velocity EU
title: Kubernetes 101
speaker: bridgetkromhout
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149
- date: 2018-10-30
city: London, UK
country: uk
event: Velocity EU
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71231
- date: 2018-07-12
city: Minneapolis, MN
country: us
event: devopsdays Minneapolis
title: Kubernetes 101
speaker: "ashleymcnamara, bketelsen"
slides: https://devopsdaysmsp2018.container.training
attend: https://www.devopsdays.org/events/2018-minneapolis/registration/
- date: 2018-10-01
@@ -63,30 +22,12 @@
speaker: jpetazzo
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/69875
- date: 2018-09-30
city: New York, NY
country: us
event: Velocity
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70147
- date: 2018-09-17
country: fr
city: Paris
event: ENIX SAS
speaker: jpetazzo
title: Déployer ses applications avec Kubernetes (in French)
lang: fr
attend: https://enix.io/fr/services/formation/deployer-ses-applications-avec-kubernetes/
- date: 2018-07-17
city: Portland, OR
country: us
event: OSCON
title: Kubernetes 101
speaker: bridgetkromhout
slides: https://oscon2018.container.training/
attend: https://conferences.oreilly.com/oscon/oscon-or/public/schedule/detail/66287
- date: 2018-06-27

View File

@@ -13,47 +13,47 @@ exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- intro/Docker_Machine.md
- - intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -13,47 +13,47 @@ exclude:
- in-person
chapters:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- common/title.md
# - common/logistics.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- intro/Docker_Machine.md
- - intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -312,7 +312,7 @@ CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
EXPOSE 5000
```
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
(Source: [traininghweels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
---

View File

@@ -1,179 +0,0 @@
# Accessing the API with `kubectl proxy`
- The API requires us to authenticate.red[¹]
- There are many authentication methods available, including:
- TLS client certificates
<br/>
(that's what we've used so far)
- HTTP basic password authentication
<br/>
(from a static file; not recommended)
- various token mechanisms
<br/>
(detailed in the [documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies))
.red[¹]OK, we lied. If you don't authenticate, you are considered to
be user `system:anonymous`, which doesn't have any access rights by default.
---
## Accessing the API directly
- Let's see what happens if we try to access the API directly with `curl`
.exercise[
- Retrieve the ClusterIP allocated to the `kubernetes` service:
```bash
kubectl get svc kubernetes
```
- Replace the IP below and try to connect with `curl`:
```bash
curl -k https://`10.96.0.1`/
```
]
The API will tell us that user `system:anonymous` cannot access this path.
---
## Authenticating to the API
If we wanted to talk to the API, we would need to:
- extract our TLS key and certificate information from `~/.kube/config`
(the information is in PEM format, encoded in base64)
- use that information to present our certificate when connecting
(for instance, with `openssl s_client -key ... -cert ... -connect ...`)
- figure out exactly which credentials to use
(once we start juggling multiple clusters)
- change that whole process if we're using another authentication method
🤔 There has to be a better way!
---
## Using `kubectl proxy` for authentication
- `kubectl proxy` runs a proxy in the foreground
- This proxy lets us access the Kubernetes API without authentication
(`kubectl proxy` adds our credentials on the fly to the requests)
- This proxy lets us access the Kubernetes API over plain HTTP
- This is a great tool to learn and experiment with the Kubernetes API
- ... And for serious usages as well (suitable for one-shot scripts)
- For unattended use, it is better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
---
## Trying `kubectl proxy`
- Let's start `kubectl proxy` and then do a simple request with `curl`!
.exercise[
- Start `kubectl proxy` in the background:
```bash
kubectl proxy &
```
- Access the API's default route:
```bash
curl localhost:8001
```
- Terminate the proxy:
```bash
kill %1
```
]
The output is a list of available API routes.
---
## `kubectl proxy` is intended for local use
- By default, the proxy listens on port 8001
(But this can be changed, or we can tell `kubectl proxy` to pick a port)
- By default, the proxy binds to `127.0.0.1`
(Making it unreachable from other machines, for security reasons)
- By default, the proxy only accepts connections from:
`^localhost$,^127\.0\.0\.1$,^\[::1\]$`
- This is great when running `kubectl proxy` locally
- Not-so-great when you want to connect to the proxy from a remote machine
---
## Running `kubectl proxy` on a remote machine
- If we wanted to connect to the proxy from another machine, we would need to:
- bind to `INADDR_ANY` instead of `127.0.0.1`
- accept connections from any address
- This is achieved with:
```
kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.*
```
.warning[Do not do this on a real cluster: it opens full unauthenticated access!]
---
## Security considerations
- Running `kubectl proxy` openly is a huge security risk
- It is slightly better to run the proxy where you need it
(and copy credentials, e.g. `~/.kube/config`, to that place)
- It is even better to use a limited account with reduced permissions
---
## Good to know ...
- `kubectl proxy` also gives access to all internal services
- Specifically, services are exposed as such:
```
/api/v1/namespaces/<namespace>/services/<service>/proxy
```
- We can use `kubectl proxy` to access an internal service in a pinch
(or, for non HTTP services, `kubectl port-forward`)
- This is not very useful when running `kubectl` directly on the cluster
(since we could connect to the services directly anyway)
- But it is very powerful as soon as you run `kubectl` from a remote machine

View File

@@ -1,20 +0,0 @@
# Links and resources
All things Kubernetes:
- [Kubernetes Community](https://kubernetes.io/community/) - Slack, Google Groups, meetups
- [Kubernetes on StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes)
- [Play With Kubernetes Hands-On Labs](https://medium.com/@marcosnils/introducing-pwk-play-with-k8s-159fcfeb787b)
All things Docker:
- [Docker documentation](http://docs.docker.com/)
- [Docker Hub](https://hub.docker.com)
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
- [Play With Docker Hands-On Labs](http://training.play-with-docker.com/)
Everything else:
- [Local meetups](https://www.meetup.com/)
.footnote[These slides (and future updates) are on → http://container.training/]

View File

@@ -1,268 +0,0 @@
# Network policies
- Namespaces help us to *organize* resources
- Namespaces do not provide isolation
- By default, every pod can contact every other pod
- By default, every service accepts traffic from anyone
- If we want this to be different, we need *network policies*
---
## What's a network policy?
A network policy is defined by the following things.
- A *pod selector* indicating which pods it applies to
e.g.: "all pods in namespace `blue` with the label `zone=internal`"
- A list of *ingress rules* indicating which inbound traffic is allowed
e.g.: "TCP connections to ports 8000 and 8080 coming from pods with label `zone=dmz`,
and from the external subnet 4.42.6.0/24, except 4.42.6.5"
- A list of *egress rules* indicating which outbound traffic is allowed
A network policy can provide ingress rules, egress rules, or both.
---
## How do network policies apply?
- A pod can be "selected" by any number of network policies
- If a pod isn't selected by any network policy, then its traffic is unrestricted
(In other words: in the absence of network policies, all traffic is allowed)
- If a pod is selected by at least one network policy, then all traffic is blocked ...
... unless it is explicitly allowed by one of these network policies
---
class: extra-details
## Traffic filtering is flow-oriented
- Network policies deal with *connections*, not individual packets
- Example: to allow HTTP (80/tcp) connections to pod A, you only need an ingress rule
(You do not need a matching egress rule to allow response traffic to go through)
- This also applies for UDP traffic
(Allowing DNS traffic can be done with a single rule)
- Network policy implementations use stateful connection tracking
---
## Pod-to-pod traffic
- Connections from pod A to pod B have to be allowed by both pods:
- pod A has to be unrestricted, or allow the connection as an *egress* rule
- pod B has to be unrestricted, or allow the connection as an *ingress* rule
- As a consequence: if a network policy restricts traffic going from/to a pod,
<br/>
the restriction cannot be overridden by a network policy selecting another pod
- This prevents an entity managing network policies in namespace A
(but without permission to do so in namespace B)
from adding network policies giving them access to namespace B
---
## The rationale for network policies
- In network security, it is generally considered better to "deny all, then allow selectively"
(The other approach, "allow all, then block selectively" makes it too easy to leave holes)
- As soon as one network policy selects a pod, the pod enters this "deny all" logic
- Further network policies can open additional access
- Good network policies should be scoped as precisely as possible
- In particular: make sure that the selector is not too broad
(Otherwise, you end up affecting pods that were otherwise well secured)
---
## Our first network policy
This is our game plan:
- run a web server in a pod
- create a network policy to block all access to the web server
- create another network policy to allow access only from specific pods
---
## Running our test web server
.exercise[
- Let's use the `nginx` image:
```bash
kubectl run testweb --image=nginx
```
- Find out the IP address of the pod with one of these two commands:
```bash
kubectl get pods -o wide -l run=testweb
IP=$(kubectl get pods -l run=testweb -o json | jq -r .items[0].status.podIP)
```
- Check that we can connect to the server:
```bash
curl $IP
```
]
The `curl` command should show us the "Welcome to nginx!" page.
---
## Adding a very restrictive network policy
- The policy will select pods with the label `run=testweb`
- It will specify an empty list of ingress rules (matching nothing)
.exercise[
- Apply the policy in this YAML file:
```bash
kubectl apply -f ~/container.training/k8s/netpol-deny-all-for-testweb.yaml
```
- Check if we can still access the server:
```bash
curl $IP
```
]
The `curl` command should now time out.
---
## Looking at the network policy
This is the file that we applied:
```yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-all-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress: []
```
---
## Allowing connections only from specific pods
- We want to allow traffic from pods with the label `run=testcurl`
- Reminder: this label is automatically applied when we do `kubectl run testcurl ...`
.exercise[
- Apply another policy:
```bash
kubectl apply -f ~/container.training/netpol-allow-testcurl-for-testweb.yaml
```
]
---
## Looking at the network policy
This is the second file that we applied:
```yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-testcurl-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress:
- from:
- podSelector:
matchLabels:
run: testcurl
```
---
## Testing the network policy
- Let's create pods with, and without, the required label
.exercise[
- Try to connect to testweb from a pod with the `run=testcurl` label:
```bash
kubectl run testcurl --rm -i --image=centos -- curl -m3 $IP
```
- Try to connect to testweb with a different label:
```bash
kubectl run testkurl --rm -i --image=centos -- curl -m3 $IP
```
]
The first command will work (and show the "Welcome to nginx!" page).
The second command will fail and time out after 3 seconds.
(The timeout is obtained with the `-m3` option.)
---
## An important warning
- Some network plugins only have partial support for network policies
- For instance, Weave [doesn't support ipBlock (yet)](https://github.com/weaveworks/weave/issues/3168)
- Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
- Unsupported features might be silently ignored
(Making you believe that you are secure, when you're not)
---
## Further resources
- As always, the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) is a good starting point
- And two resources by [Ahmet Alp Balkan](https://ahmet.im/):
- a [very good talk about network policies](https://www.youtube.com/watch?list=PLj6h78yzYM2P-3-xqvmWaZbbI1sW-ulZb&v=3gGpMmYeEO8) at KubeCon North America 2017
- a repository of [ready-to-use recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) for network policies

52
slides/kube-90min.yml Normal file
View File

@@ -0,0 +1,52 @@
title: |
Kubernetes 101
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
slides: http://container.training/
exclude:
- self-paced
- extra-details
chapters:
- common/title.md
- logistics.md
#- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- common/composescale.md
- common/composedown.md
- kube/concepts-k8s.md
# - common/declarative.md
- kube/declarative.md
# - kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- - kube/kubectlrun.md
- kube/kubectlexpose.md
- kube/ourapponkube.md
#- kube/kubectlproxy.md
- - kube/dashboard.md
- kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
# Stern is interesting but can be skipped
#- - kube/logs-cli.md
# Bridget hasn't added EFK yet
#- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
# Bridget-specific
# - kube/links-bridget.md
- common/thankyou.md

View File

@@ -14,35 +14,34 @@ exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
- shared/composedown.md
- - k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- k8s/kubectlrun.md
- - k8s/kubectlexpose.md
- k8s/ourapponkube.md
- k8s/kubectlproxy.md
- k8s/dashboard.md
- - k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
- k8s/netpol.md
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
#- common/composescale.md
- common/composedown.md
- - kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- kube/kubectlrun.md
- - kube/kubectlexpose.md
- kube/ourapponkube.md
- kube/kubectlproxy.md
- kube/dashboard.md
- - kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
#- kube/logs-cli.md
#- kube/logs-centralized.md
#- kube/helm.md
#- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
- common/thankyou.md

View File

@@ -13,41 +13,38 @@ exclude:
- self-paced
chapters:
- shared/title.md
#- logistics.md
# Bridget-specific; others use logistics.md
- logistics-bridget.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- common/title.md
- logistics.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- shared/composescale.md
- shared/composedown.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- - k8s/kubectlrun.md
- k8s/kubectlexpose.md
- k8s/ourapponkube.md
#- k8s/kubectlproxy.md
- - k8s/dashboard.md
- k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- - k8s/logs-cli.md
#- common/composescale.md
- common/composedown.md
- kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- - kube/kubectlrun.md
- kube/kubectlexpose.md
- kube/ourapponkube.md
#- kube/kubectlproxy.md
- - kube/dashboard.md
- kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
- - kube/logs-cli.md
# Bridget hasn't added EFK yet
#- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
#- k8s/netpol.md
- k8s/whatsnext.md
# - k8s/links.md
#- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
# - kube/links.md
# Bridget-specific
- k8s/links-bridget.md
- shared/thankyou.md
- kube/links-bridget.md
- common/thankyou.md

View File

@@ -13,35 +13,34 @@ exclude:
- in-person
chapters:
- shared/title.md
- common/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composescale.md
- shared/composedown.md
- - k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- k8s/kubectlrun.md
- - k8s/kubectlexpose.md
- k8s/ourapponkube.md
- k8s/kubectlproxy.md
- k8s/dashboard.md
- - k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- - k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
- k8s/netpol.md
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
- common/composescale.md
- common/composedown.md
- - kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- kube/kubectlrun.md
- - kube/kubectlexpose.md
- kube/ourapponkube.md
- kube/kubectlproxy.md
- kube/dashboard.md
- - kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
- - kube/logs-cli.md
- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
- common/thankyou.md

View File

@@ -161,7 +161,7 @@ class: pic
(This is illustrated on the first "super complicated" schema)
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
- In some hosted Kubernetes offerings (e.g. GKE), the control plane is invisible
(We only "see" a Kubernetes API endpoint)
@@ -171,11 +171,7 @@ class: pic
---
## Do we need to run Docker at all?
No!
--
## Default container runtime
- By default, Kubernetes uses the Docker Engine to run containers
@@ -185,42 +181,6 @@ No!
(like CRI-O, or containerd)
---
## Do we need to run Docker at all?
Yes!
--
- In this workshop, we run our app on a single node first
- We will need to build images and ship them around
- We can do these things without Docker
<br/>
(and get diagnosed with NIH¹ syndrome)
- Docker is still the most stable container engine today
<br/>
(but other options are maturing very quickly)
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
---
## Do we need to run Docker at all?
- On our development environments, CI pipelines ... :
*Yes, almost certainly*
- On our production servers:
*Yes (today)*
*Probably not (in the future)*
.footnote[More information about CRI [on the Kubernetes blog](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes)]
---
@@ -235,40 +195,15 @@ Yes!
- node (a machine — physical or virtual — in our cluster)
- pod (group of containers running together on a node)
- IP addresses are associated with *pods*, not with individual containers
- service (stable network endpoint to connect to one or multiple containers)
- namespace (more-or-less isolated group of things)
- secret (bundle of sensitive data to be passed to a container)
And much more!
- We can see the full list by running `kubectl api-resources`
(In Kubernetes 1.10 and prior, the command to list API resources was `kubectl get`)
And much more! (We can see the full list by running `kubectl get`)
---
class: pic
![Node, pod, container](images/k8s-arch3-thanks-weave.png)
---
class: pic
![One of the best Kubernetes architecture diagrams available](images/k8s-arch4-thanks-luxas.png)
---
## Credits
- The first diagram is courtesy of Weave Works
- a *pod* can have multiple containers working together
- IP addresses are associated with *pods*, not with individual containers
- The second diagram is courtesy of Lucas Käldström, in [this presentation](https://speakerdeck.com/luxas/kubeadm-cluster-creation-internals-from-self-hosting-to-upgradability-and-ha)
- it's one of the best Kubernetes architecture diagrams available!
Both diagrams used with permission.

View File

@@ -95,21 +95,10 @@ Note: `--export` will remove "cluster-specific" information, i.e.:
- Change `kind: Deployment` to `kind: DaemonSet`
<!--
```bash vim rng.yml```
```wait kind: Deployment```
```keys /Deployment```
```keys ^J```
```keys cwDaemonSet```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Save, quit
- Try to create our new resource:
```
```bash
kubectl apply -f rng.yml
```
@@ -141,7 +130,6 @@ We all knew this couldn't be that easy, right!
- remove the `replicas` field
- remove the `strategy` field (which defines the rollout mechanism for a deployment)
- remove the `progressDeadlineSeconds` field (also used by the rollout mechanism)
- remove the `status: {}` line at the end
--
@@ -224,34 +212,6 @@ daemonset.apps/rng 2 2 2 2 2 <none>
---
## Too many pods
- If we check with `kubectl get pods`, we see:
- *one pod* for the deployment (named `rng-xxxxxxxxxx-yyyyy`)
- *one pod per node* for the daemon set (named `rng-zzzzz`)
```
NAME READY STATUS RESTARTS AGE
rng-54f57d4d49-7pt82 1/1 Running 0 11m
rng-b85tm 1/1 Running 0 25s
rng-hfbrr 1/1 Running 0 25s
[...]
```
--
The daemon set created one pod per node, except on the master node.
The master node has [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) preventing pods from running there.
(To schedule a pod on this node anyway, the pod will require appropriate [tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/).)
.footnote[(Off by one? We don't run these pods on the node hosting the control plane.)]
---
## What are all these pods doing?
- Let's check the logs of all these `rng` pods
@@ -356,186 +316,6 @@ The replica set selector also has a `pod-template-hash`, unlike the pods in our
---
# Updating a service through labels and selectors
- What if we want to drop the `rng` deployment from the load balancer?
- Option 1:
- destroy it
- Option 2:
- add an extra *label* to the daemon set
- update the service *selector* to refer to that *label*
--
Of course, option 2 offers more learning opportunities. Right?
---
## Add an extra label to the daemon set
- We will update the daemon set "spec"
- Option 1:
- edit the `rng.yml` file that we used earlier
- load the new definition with `kubectl apply`
- Option 2:
- use `kubectl edit`
--
*If you feel like you got this💕🌈, feel free to try directly.*
*We've included a few hints on the next slides for your convenience!*
---
## We've put resources in your resources
- Reminder: a daemon set is a resource that creates more resources!
- There is a difference between:
- the label(s) of a resource (in the `metadata` block in the beginning)
- the selector of a resource (in the `spec` block)
- the label(s) of the resource(s) created by the first resource (in the `template` block)
- You need to update the selector and the template (metadata labels are not mandatory)
- The template must match the selector
(i.e. the resource will refuse to create resources that it will not select)
---
## Adding our label
- Let's add a label `isactive: yes`
- In YAML, `yes` should be quoted; i.e. `isactive: "yes"`
.exercise[
- Update the daemon set to add `isactive: "yes"` to the selector and template label:
```bash
kubectl edit daemonset rng
```
<!--
```wait Please edit the object below```
```keys /run: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys /run: rng```
```keys ^J```
```keys oisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Update the service to add `isactive: "yes"` to its selector:
```bash
kubectl edit service rng
```
<!--
```wait Please edit the object below```
```keys /run: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
]
---
## Checking what we've done
.exercise[
- Check the most recent log line of all `run=rng` pods to confirm that exactly one per node is now active:
```bash
kubectl logs -l run=rng --tail 1
```
]
The timestamps should give us a hint about how many pods are currently receiving traffic.
.exercise[
- Look at the pods that we have right now:
```bash
kubectl get pods
```
]
---
## Cleaning up
- The pods of the deployment and the "old" daemon set are still running
- We are going to identify them programmatically
.exercise[
- List the pods with `run=rng` but without `isactive=yes`:
```bash
kubectl get pods -l run=rng,isactive!=yes
```
- Remove these pods:
```bash
kubectl delete pods -l run=rng,isactive!=yes
```
]
---
## Cleaning up stale pods
```
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
rng-54f57d4d49-7pt82 1/1 Terminating 0 51m
rng-54f57d4d49-vgz9h 1/1 Running 0 22s
rng-b85tm 1/1 Terminating 0 39m
rng-hfbrr 1/1 Terminating 0 39m
rng-vplmj 1/1 Running 0 7m
rng-xbpvg 1/1 Running 0 7m
[...]
```
- The extra pods (noted `Terminating` above) are going away
- ... But a new one (`rng-54f57d4d49-vgz9h` above) was restarted immediately!
--
- Remember, the *deployment* still exists, and makes sure that one pod is up and running
- If we delete the pod associated to the deployment, it is recreated automatically
---
## Deleting a deployment
.exercise[
@@ -560,63 +340,3 @@ rng-xbpvg 1/1 Running 0 11m
```
Ding, dong, the deployment is dead! And the daemon set lives on.
---
## Avoiding extra pods
- When we changed the definition of the daemon set, it immediately created new pods. We had to remove the old ones manually.
- How could we have avoided this?
--
- By adding the `isactive: "yes"` label to the pods before changing the daemon set!
- This can be done programmatically with `kubectl patch`:
```bash
PATCH='
metadata:
labels:
isactive: "yes"
'
kubectl get pods -l run=rng -l controller-revision-hash -o name |
xargs kubectl patch -p "$PATCH"
```
---
## Labels and debugging
- When a pod is misbehaving, we can delete it: another one will be recreated
- But we can also change its labels
- It will be removed from the load balancer (it won't receive traffic anymore)
- Another pod will be recreated immediately
- But the problematic pod is still here, and we can inspect and debug it
- We can even re-add it to the rotation if necessary
(Very useful to troubleshoot intermittent and elusive bugs)
---
## Labels and advanced rollout control
- Conversely, we can add pods matching a service's selector
- These pods will then receive requests and serve traffic
- Examples:
- one-shot pod with all debug flags enabled, to collect logs
- pods created automatically, but added to rotation in a second step
<br/>
(by setting their label accordingly)
- This gives us building blocks for canary and blue/green deployments

View File

@@ -10,9 +10,6 @@
3) bypass authentication for the dashboard
--
There is an additional step to make the dashboard available from outside (we'll get to that)
--
@@ -32,11 +29,15 @@ There is an additional step to make the dashboard available from outside (we'll
- Create all the dashboard resources, with the following command:
```bash
kubectl apply -f ~/container.training/k8s/kubernetes-dashboard.yaml
kubectl apply -f https://goo.gl/Qamqab
```
]
The goo.gl URL expands to:
<br/>
.small[https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml]
---
@@ -68,11 +69,15 @@ There is an additional step to make the dashboard available from outside (we'll
- Apply the convenient YAML file, and defeat SSL protection:
```bash
kubectl apply -f ~/container.training/k8s/socat.yaml
kubectl apply -f https://goo.gl/tA7GLz
```
]
The goo.gl URL expands to:
<br/>
.small[.small[https://gist.githubusercontent.com/jpetazzo/c53a28b5b7fdae88bc3c5f0945552c04/raw/da13ef1bdd38cc0e90b7a4074be8d6a0215e1a65/socat.yaml]]
.warning[All our dashboard traffic is now clear-text, including passwords!]
---
@@ -127,7 +132,7 @@ The dashboard will then ask you which authentication you want to use.
- Grant admin privileges to the dashboard so we can see our resources:
```bash
kubectl apply -f ~/container.training/k8s/grant-admin-to-dashboard.yaml
kubectl apply -f https://goo.gl/CHsLTA
```
- Reload the dashboard and enjoy!
@@ -140,68 +145,6 @@ The dashboard will then ask you which authentication you want to use.
---
## Exposing the dashboard over HTTPS
- We took a shortcut by forwarding HTTP to HTTPS inside the cluster
- Let's expose the dashboard over HTTPS!
- The dashboard is exposed through a `ClusterIP` service (internal traffic only)
- We will change that into a `NodePort` service (accepting outside traffic)
.exercise[
- Edit the service:
```
kubectl edit service kubernetes-dashboard
```
]
--
`NotFound`?!? Y U NO WORK?!?
---
## Editing the `kubernetes-dashboard` service
- If we look at the [YAML](https://github.com/jpetazzo/container.training/blob/master/k8s/kubernetes-dashboard.yaml) that we loaded before, we'll get a hint
--
- The dashboard was created in the `kube-system` namespace
--
.exercise[
- Edit the service:
```bash
kubectl -n kube-system edit service kubernetes-dashboard
```
- Change `ClusterIP` to `NodePort`, save, and exit
<!--
```wait Please edit the object below```
```keys /ClusterIP```
```keys ^J```
```keys cwNodePort```
```keys ^[ ``` ]
```keys :wq```
```keys ^J```
-->
- Check the port that was assigned with `kubectl -n kube-system get services`
- Connect to https://oneofournodes:3xxxx/ (yes, https)
]
---
## Running the Kubernetes dashboard securely
- The steps that we just showed you are *for educational purposes only!*

View File

@@ -34,47 +34,27 @@
## Installing Helm
- If the `helm` CLI is not installed in your environment, install it
- We need to install the `helm` CLI; then use it to deploy `tiller`
.exercise[
- Check if `helm` is installed:
```bash
helm
```
- If it's not installed, run the following command:
- Install the `helm` CLI:
```bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
```
]
---
## Installing Tiller
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
- They can be managed (installed, upgraded...) with the `helm` CLI
.exercise[
- Deploy Tiller:
- Deploy `tiller`:
```bash
helm init
```
- Add the `helm` completion:
```bash
. <(helm completion $(basename $SHELL))
```
]
If Tiller was already installed, don't worry: this won't break it.
At the end of the install process, you will see:
```
Happy Helming!
```
---
## Fix account permissions

View File

@@ -6,7 +6,7 @@
- If we want to connect to our pod(s), we need to create a *service*
- Once a service is created, CoreDNS will allow us to resolve it by name
- Once a service is created, `kube-dns` will allow us to resolve it by name
(i.e. after creating service `hello`, the name `hello` will resolve to something)
@@ -46,7 +46,7 @@ Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables`
- `ExternalName`
- the DNS entry managed by CoreDNS will just be a `CNAME` to a provided record
- the DNS entry managed by `kube-dns` will just be a `CNAME` to a provided record
- no port, no IP address, no nothing else is allocated
The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
@@ -69,10 +69,7 @@ The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
kubectl get pods -w
```
<!--
```wait elastic-```
```keys ^C```
-->
<!-- ```keys ^C``` -->
]
@@ -131,11 +128,6 @@ Note: please DO NOT call the service `search`. It would collide with the TLD.
IP=$(kubectl get svc elastic -o go-template --template '{{ .spec.clusterIP }}')
```
<!--
```hide kubectl wait deploy elastic --for condition=available```
```hide sleep 5``` (give some time for elasticsearch to start... hopefully this is enough!)
-->
- Send a few requests:
```bash
curl http://$IP:9200/
@@ -187,7 +179,7 @@ class: extra-details
- Since there is no virtual IP address, there is no load balancer either
- CoreDNS will return the pods' IP addresses as multiple `A` records
- `kube-dns` will return the pods' IP addresses as multiple `A` records
- This gives us an easy way to discover all the replicas for a deployment

View File

@@ -1,3 +1,5 @@
class: extra-details
# First contact with `kubectl`
- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes
@@ -79,13 +81,13 @@
---
class: extra-details
## What's available?
- `kubectl` has pretty good introspection facilities
- We can list all available resource types by running `kubectl api-resources`
<br/>
(In Kubernetes 1.10 and prior, this command used to be `kubectl get`)
- We can list all available resource types by running `kubectl get`
- We can view details about a resource with:
```bash
@@ -226,7 +228,7 @@ The `kube-system` namespace is used for the control plane.
- `kube-controller-manager` and `kube-scheduler` are other master components
- `coredns` provides DNS-based service discovery ([replacing kube-dns as of 1.11](https://kubernetes.io/blog/2018/07/10/coredns-ga-for-kubernetes-cluster-dns/))
- `kube-dns` is an additional component (not mandatory but super useful, so it's there)
- `kube-proxy` is the (per-node) component managing port mappings and such

117
slides/kube/kubectlproxy.md Normal file
View File

@@ -0,0 +1,117 @@
# Accessing internal services with `kubectl proxy`
- `kubectl proxy` runs a proxy in the foreground
- This proxy lets us access the Kubernetes API without authentication
(`kubectl proxy` adds our credentials on the fly to the requests)
- This proxy lets us access the Kubernetes API over plain HTTP
- This is a great tool to learn and experiment with the Kubernetes API
- The Kubernetes API also gives us a proxy to HTTP and HTTPS services
- Therefore, we can use `kubectl proxy` to access internal services
(Without using a `NodePort` or similar service)
---
## Secure by default
- By default, the proxy listens on port 8001
(But this can be changed, or we can tell `kubectl proxy` to pick a port)
- By default, the proxy binds to `127.0.0.1`
(Making it unreachable from other machines, for security reasons)
- By default, the proxy only accepts connections from:
`^localhost$,^127\.0\.0\.1$,^\[::1\]$`
- This is great when running `kubectl proxy` locally
- Not-so-great when running it on a remote machine
---
## Running `kubectl proxy` on a remote machine
- We are going to bind to `INADDR_ANY` instead of `127.0.0.1`
- We are going to accept connections from any address
.exercise[
- Run an open proxy to the Kubernetes API:
```bash
kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.*
```
]
.warning[Anyone can now do whatever they want with our Kubernetes cluster!
<br/>
(Don't do this on a real cluster!)]
---
## Viewing available API routes
- The default route (i.e. `/`) shows a list of available API endpoints
.exercise[
- Point your browser to the IP address of the node running `kubectl proxy`, port 8888
]
The result should look like this:
```json
{
"paths": [
"/api",
"/api/v1",
"/apis",
"/apis/",
"/apis/admissionregistration.k8s.io",
```
---
## Connecting to a service through the proxy
- The API can proxy HTTP and HTTPS requests by accessing a special route:
```
/api/v1/namespaces/`name_of_namespace`/services/`name_of_service`/proxy
```
- Since we now have access to the API, we can use this special route
.exercise[
- Access the `hasher` service through the special proxy route:
```open
http://`X.X.X.X`:8888/api/v1/namespaces/default/services/hasher/proxy
```
]
You should see the banner of the hasher service: `HASHER running on ...`
---
## Stopping the proxy
- Remember: as it is running right now, `kubectl proxy` gives open access to our cluster
.exercise[
- Stop the `kubectl proxy` process with Ctrl-C
]

View File

@@ -26,8 +26,6 @@
kubectl run pingpong --image alpine ping 1.1.1.1
```
<!-- ```hide kubectl wait deploy/pingpong --for condition=available``` -->
]
--
@@ -83,6 +81,8 @@ Note: as of 1.10.1, resource types are displayed in more detail.
---
class: extra-details
## Our `pingpong` deployment
- `kubectl run` created a *deployment*, `deployment.apps/pingpong`
@@ -135,6 +135,8 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
---
class: extra-details
## Streaming logs in real time
- Just like `docker logs`, `kubectl logs` supports convenient options:
@@ -198,13 +200,10 @@ We could! But the *deployment* would notice it right away, and scale back to the
<!--
```wait Running```
```keys ^C```
```hide kubectl wait deploy pingpong --for condition=available```
```keys kubectl delete pod ping```
```copypaste pong-..........-.....```
-->
- Destroy a pod:
```
```bash
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
]
@@ -228,6 +227,8 @@ We could! But the *deployment* would notice it right away, and scale back to the
---
clas: extra-details
## Viewing logs of multiple pods
- When we specify a deployment name, only one single pod's logs are shown
@@ -251,6 +252,8 @@ Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple
---
class: extra-details
## Aren't we flooding 1.1.1.1?
- If you're wondering this, good question!

View File

@@ -10,12 +10,7 @@
kubectl get deployments -w
```
<!--
```wait RESTARTS```
```keys ^C```
```wait AVAILABLE```
```keys ^C```
-->
<!-- ```keys ^C``` -->
- Now, create more `worker` replicas:
```bash

View File

@@ -6,7 +6,7 @@
- [Play With Kubernetes Hands-On Labs](https://medium.com/@marcosnils/introducing-pwk-play-with-k8s-159fcfeb787b)
- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/)
- [Azure Container Service](https://docs.microsoft.com/azure/aks/)
- [Cloud Developer Advocates](https://developer.microsoft.com/advocates/)

11
slides/kube/links.md Normal file
View File

@@ -0,0 +1,11 @@
# Links and resources
- [Kubernetes Community](https://kubernetes.io/community/) - Slack, Google Groups, meetups
- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/)
- [Cloud Developer Advocates](https://developer.microsoft.com/advocates/)
- [Local meetups](https://www.meetup.com/)
.footnote[These slides (and future updates) are on → http://container.training/]

View File

@@ -40,12 +40,12 @@
- Load the YAML file into our cluster:
```bash
kubectl apply -f ~/container.training/k8s/efk.yaml
kubectl apply -f https://goo.gl/MUZhE4
```
]
If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that
If we [look at the YAML file](https://goo.gl/MUZhE4), we see that
it creates a daemon set, two deployments, two services,
and a few roles and role bindings (to give fluentd the required permissions).
@@ -113,7 +113,7 @@ and a few roles and role bindings (to give fluentd the required permissions).
- The first time you connect to Kibana, you must "configure an index pattern"
- Just use the one that is suggested, `@timestamp`.red[*]
- Just use the one that is suggested, `@timestamp`
- Then click "Discover" (in the top-left corner)
@@ -123,9 +123,6 @@ and a few roles and role bindings (to give fluentd the required permissions).
`kubernetes.host`, `kubernetes.pod_name`, `stream`, `log`
.red[*]If you don't see `@timestamp`, it's probably because no logs exist yet.
<br/>Wait a bit, and double-check the logging pipeline!
---
## Caveat emptor

View File

@@ -47,25 +47,23 @@ Exactly what we need!
## Installing Stern
- Run `stern` (without arguments) to check if it's installed:
- For simplicity, let's just grab a binary release
```
$ stern
Tail multiple pods and containers from Kubernetes
.exercise[
Usage:
stern pod-query [flags]
```
- If it is not installed, the easiest method is to download a [binary release](https://github.com/wercker/stern/releases)
- The following commands will install Stern on a Linux Intel 64 bits machine:
- Download a binary release from GitHub:
```bash
sudo curl -L -o /usr/local/bin/stern \
https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
https://github.com/wercker/stern/releases/download/1.6.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
```
]
These installation instructions will work on our clusters, since they are Linux amd64 VMs.
However, you will have to adapt them if you want to install Stern on your local machine.
---
## Using Stern

View File

@@ -151,7 +151,7 @@ Note: it might take a minute or two for the app to be up and running.
- A pod in the `default` namespace can communicate with a pod in the `kube-system` namespace
- CoreDNS uses a different subdomain for each namespace
- `kube-dns` uses a different subdomain for each namespace
- Example: from any pod in the cluster, you can connect to the Kubernetes API with:

View File

@@ -4,6 +4,8 @@ Our app on Kube
---
class: extra-details
## What's on the menu?
In this part, we will:
@@ -114,8 +116,6 @@ In this part, we will:
.exercise[
<!-- ```hide kubectl wait deploy/registry --for condition=available```-->
- View the repositories currently held in our registry:
```bash
curl $REGISTRY/v2/_catalog
@@ -132,6 +132,8 @@ We should see:
---
class: extra-details
## Testing our local registry
- We can retag a small image, and push it to the registry
@@ -153,6 +155,8 @@ We should see:
---
class: extra-details
## Checking again what's on our local registry
- Let's use the same endpoint as before
@@ -277,11 +281,6 @@ class: extra-details
.exercise[
<!-- ```hide
kubectl wait deploy/rng --for condition=available
kubectl wait deploy/worker --for condition=available
``` -->
- Look at some logs:
```bash
kubectl logs deploy/rng
@@ -335,8 +334,9 @@ kubectl wait deploy/worker --for condition=available
(Give it about 10 seconds to recover)
<!--
```wait units of work done, updating hash counter```
```keys ^C```
```keys
^C
```
-->
]

View File

@@ -96,10 +96,7 @@
kubectl get deployments -w
```
<!--
```wait NAME```
```keys ^C```
-->
<!-- ```keys ^C``` -->
- Update `worker` either with `kubectl edit`, or by running:
```bash
@@ -153,38 +150,23 @@ That rollout should be pretty quick. What shows in the web UI?
kubectl rollout status deploy worker
```
<!--
```wait Waiting for deployment```
```keys ^C```
-->
]
--
Our rollout is stuck. However, the app is not dead.
(After a minute, it will stabilize to be 20-25% slower.)
Our rollout is stuck. However, the app is not dead (just 10% slower).
---
## What's going on with our rollout?
- Why is our app a bit slower?
- Why is our app 10% slower?
- Because `MaxUnavailable=25%`
- Because `MaxUnavailable=1`, so the rollout terminated 1 replica out of 10 available
... So the rollout terminated 2 replicas out of 10 available
- Okay, but why do we see 2 new replicas being rolled out?
- Okay, but why do we see 5 new replicas being rolled out?
- Because `MaxSurge=25%`
... So in addition to replacing 2 replicas, the rollout is also starting 3 more
- It rounded down the number of MaxUnavailable pods conservatively,
<br/>
but the total number of pods being rolled out is allowed to be 25+25=50%
- Because `MaxSurge=1`, so in addition to replacing the terminated one, the rollout is also starting one more
---
@@ -194,15 +176,15 @@ class: extra-details
- We start with 10 pods running for the `worker` deployment
- Current settings: MaxUnavailable=25% and MaxSurge=25%
- Current settings: MaxUnavailable=1 and MaxSurge=1
- When we start the rollout:
- two replicas are taken down (as per MaxUnavailable=25%)
- two others are created (with the new version) to replace them
- three others are created (with the new version) per MaxSurge=25%)
- one replica is taken down (as per MaxUnavailable=1)
- another is created (with the new version) to replace it
- another is created (with the new version) per MaxSurge=1
- Now we have 8 replicas up and running, and 5 being deployed
- Now we have 9 replicas up and running, and 2 being deployed
- Our rollout is stuck at this point!
@@ -210,7 +192,6 @@ class: extra-details
## Checking the dashboard during the bad rollout
If you haven't deployed the Kubernetes dashboard earlier, just skip this slide.
.exercise[
@@ -269,7 +250,7 @@ Note the `3xxxx` port.
- revert to `v0.1`
- be conservative on availability (always have desired number of available workers)
- go slow on rollout speed (update only one pod at a time)
- be aggressive on rollout speed (update more than one pod at a time)
- give some time to our workers to "warm up" before starting more
The corresponding changes can be expressed in the following YAML snippet:
@@ -285,7 +266,7 @@ spec:
strategy:
rollingUpdate:
maxUnavailable: 0
maxSurge: 1
maxSurge: 3
minReadySeconds: 10
```
]
@@ -314,7 +295,7 @@ spec:
strategy:
rollingUpdate:
maxUnavailable: 0
maxSurge: 1
maxSurge: 3
minReadySeconds: 10
"
kubectl rollout status deployment worker

View File

@@ -10,7 +10,7 @@
2. Install Kubernetes packages
3. Run `kubeadm init` on the first node (it deploys the control plane on that node)
3. Run `kubeadm init` on the master node
4. Set up Weave (the overlay network)
<br/>
@@ -24,23 +24,6 @@
---
## `kubeadm` drawbacks
- Doesn't set up Docker or any other container engine
- Doesn't set up the overlay network
- Doesn't set up multi-master (no high availability)
--
(At least ... not yet!)
--
- "It's still twice as many steps as setting up a Swarm cluster 😕" -- Jérôme
---
## Other deployment options

View File

@@ -1,45 +1,3 @@
# Next steps
*Alright, how do I get started and containerize my apps?*
--
Suggested containerization checklist:
.checklist[
- write a Dockerfile for one service in one app
- write Dockerfiles for the other (buildable) services
- write a Compose file for that whole app
- make sure that devs are empowered to run the app in containers
- set up automated builds of container images from the code repo
- set up a CI pipeline using these container images
- set up a CD pipeline (for staging/QA) using these images
]
And *then* it is time to look at orchestration!
---
## Namespaces
- Namespaces let you run multiple identical stacks side by side
- Two namespaces (e.g. `blue` and `green`) can each have their own `redis` service
- Each of the two `redis` services has its own `ClusterIP`
- CoreDNS creates two entries, mapping to these two `ClusterIP` addresses:
`redis.blue.svc.cluster.local` and `redis.green.svc.cluster.local`
- Pods in the `blue` namespace get a *search suffix* of `blue.svc.cluster.local`
- As a result, resolving `redis` from a pod in the `blue` namespace yields the "local" `redis`
.warning[This does not provide *isolation*! That would be the job of network policies.]
---
## Stateful services (databases etc.)
- As a first step, it is wiser to keep stateful services *outside* of the cluster
@@ -93,37 +51,13 @@ And *then* it is time to look at orchestration!
---
## Logging
## Logging and metrics
- Logging is delegated to the container engine
- Logs are exposed through the API
- Logs are also accessible through local files (`/var/log/containers`)
- Log shipping to a central platform is usually done through these files
(e.g. with an agent bind-mounting the log directory)
---
## Metrics
- The kubelet embeds [cAdvisor](https://github.com/google/cadvisor), which exposes container metrics
(cAdvisor might be separated in the future for more flexibility)
- It is a good idea to start with [Prometheus](https://prometheus.io/)
(even if you end up using something else)
- Starting from Kubernetes 1.8, we can use the [Metrics API](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/)
- [Heapster](https://github.com/kubernetes/heapster) was a popular add-on
(but is being [deprecated](https://github.com/kubernetes/heapster/blob/master/docs/deprecation.md) starting with Kubernetes 1.11)
- Metrics are typically handled with [Prometheus](https://prometheus.io/)
([Heapster](https://github.com/kubernetes/heapster) is a popular add-on)
---
@@ -196,17 +130,3 @@ Sorry Star Trek fans, this is not the federation you're looking for!
- Synchronize resources across clusters
- Discover resources across clusters
---
## Developer experience
*I've put this last, but it's pretty important!*
- How do you on-board a new developer?
- What do they need to install to get a dev stack?
- How does a code change make it from dev to prod?
- How does someone add a component to a stack?

Some files were not shown because too many files have changed in this diff Show More