Compare commits

..

5 Commits

Author SHA1 Message Date
Jerome Petazzoni
56f67aedb3 fix-redirects.sh: adding forced redirect 2020-04-07 16:56:32 -05:00
Jerome Petazzoni
cb47280632 Merge branch 'master' into oscon2018 2018-07-17 10:55:25 -05:00
Bridget Kromhout
05588a86d9 Merge pull request #312 from jpetazzo/master
updates from master
2018-07-16 19:02:47 -05:00
Bridget Kromhout
59ffe6b6c8 Merge pull request #307 from bridgetkromhout/oscon2018
Adding oscon-specific details
2018-07-15 13:18:48 -05:00
Bridget Kromhout
54f1300305 Adding oscon-specific details 2018-07-15 13:16:27 -05:00
105 changed files with 421 additions and 1491 deletions

View File

@@ -1,222 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: default
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
# X-Pack Authentication
# =====================
- name: FLUENT_ELASTICSEARCH_USER
value: "elastic"
- name: FLUENT_ELASTICSEARCH_PASSWORD
value: "changeme"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: elasticsearch
name: elasticsearch
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: elasticsearch
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: elasticsearch
spec:
containers:
- image: elasticsearch:5.6.8
imagePullPolicy: IfNotPresent
name: elasticsearch
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: elasticsearch
name: elasticsearch
selfLink: /api/v1/namespaces/default/services/elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
run: elasticsearch
sessionAffinity: None
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
run: kibana
name: kibana
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
run: kibana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: kibana
spec:
containers:
- env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200/
image: kibana:5.6.8
imagePullPolicy: Always
name: kibana
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: kibana
name: kibana
selfLink: /api/v1/namespaces/default/services/kibana
spec:
externalTrafficPolicy: Cluster
ports:
- port: 5601
protocol: TCP
targetPort: 5601
selector:
run: kibana
sessionAffinity: None
type: NodePort

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system

View File

@@ -1,167 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard

View File

@@ -1,14 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-testcurl-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress:
- from:
- podSelector:
matchLabels:
run: testcurl

View File

@@ -1,10 +0,0 @@
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-all-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress: []

View File

@@ -1,67 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
creationTimestamp: null
generation: 1
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
spec:
replicas: 1
selector:
matchLabels:
run: socat
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
run: socat
spec:
containers:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
image: alpine
imagePullPolicy: Always
name: socat
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
run: socat
name: socat
namespace: kube-system
selfLink: /api/v1/namespaces/kube-system/services/socat
spec:
externalTrafficPolicy: Cluster
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
run: socat
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}

View File

@@ -93,7 +93,7 @@ wrap Run this program in a container
- The `./workshopctl` script can be executed directly.
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
### Example Steps to Launch a Batch of AWS Instances for a Workshop

View File

@@ -85,7 +85,7 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>

View File

@@ -168,22 +168,6 @@ _cmd_kube() {
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
fi"
# Install stern
pssh "
if [ ! -x /usr/local/bin/stern ]; then
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
stern --completion bash | sudo tee /etc/bash_completion.d/stern
fi"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
sep "Done"
}

View File

@@ -13,7 +13,6 @@ COMPOSE_VERSION = config["compose_version"]
MACHINE_VERSION = config["machine_version"]
CLUSTER_SIZE = config["clustersize"]
ENGINE_VERSION = config["engine_version"]
DOCKER_USER_PASSWORD = config["docker_user_password"]
#################################
@@ -55,9 +54,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
ipv4 = open("/tmp/ipv4").read()
# Add a "docker" user with password coming from the settings
# Add a "docker" user with password "training"
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
system("echo docker:training | sudo chpasswd")
# Fancy prompt courtesy of @soulshake.
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL

View File

@@ -22,6 +22,3 @@ engine_version: test
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.18.0
machine_version: 0.13.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -85,7 +85,7 @@ img {
<tr><td>login:</td></tr>
<tr><td class="logpass">docker</td></tr>
<tr><td>password:</td></tr>
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
<tr><td class="logpass">training</td></tr>
</table>
</p>

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

View File

@@ -22,6 +22,3 @@ engine_version: stable
# These correspond to the version numbers visible on their respective GitHub release pages
compose_version: 1.21.1
machine_version: 0.14.0
# Password used to connect with the "docker user"
docker_user_password: training

1
slides/_redirects Normal file
View File

@@ -0,0 +1 @@
/ /kube-halfday.yml.html 200!

View File

@@ -29,10 +29,6 @@ class State(object):
self.interactive = True
self.verify_status = False
self.simulate_type = True
self.switch_desktop = False
self.sync_slides = False
self.open_links = False
self.run_hidden = True
self.slide = 1
self.snippet = 0
@@ -41,10 +37,6 @@ class State(object):
self.interactive = bool(data["interactive"])
self.verify_status = bool(data["verify_status"])
self.simulate_type = bool(data["simulate_type"])
self.switch_desktop = bool(data["switch_desktop"])
self.sync_slides = bool(data["sync_slides"])
self.open_links = bool(data["open_links"])
self.run_hidden = bool(data["run_hidden"])
self.slide = int(data["slide"])
self.snippet = int(data["snippet"])
@@ -54,10 +46,6 @@ class State(object):
interactive=self.interactive,
verify_status=self.verify_status,
simulate_type=self.simulate_type,
switch_desktop=self.switch_desktop,
sync_slides=self.sync_slides,
open_links=self.open_links,
run_hidden=self.run_hidden,
slide=self.slide,
snippet=self.snippet,
), f, default_flow_style=False)
@@ -134,20 +122,14 @@ class Slide(object):
def focus_slides():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "3"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_terminal():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "2"])
subprocess.check_output(["i3-msg", "workspace", "1"])
def focus_browser():
if not state.switch_desktop:
return
subprocess.check_output(["i3-msg", "workspace", "4"])
subprocess.check_output(["i3-msg", "workspace", "1"])
@@ -325,21 +307,17 @@ while True:
slide = slides[state.slide]
snippet = slide.snippets[state.snippet-1] if state.snippet else None
click.clear()
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}]"
.format(state.slide, len(slides)-1,
state.snippet, len(slide.snippets) if slide.snippets else 0,
state.simulate_type, state.verify_status,
state.switch_desktop, state.sync_slides,
state.open_links, state.run_hidden))
state.simulate_type, state.verify_status))
print(hrule())
if snippet:
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
focus_terminal()
else:
print(slide.content)
if state.sync_slides:
subprocess.check_output(["./gotoslide.js", str(slide.number)])
subprocess.check_output(["./gotoslide.js", str(slide.number)])
focus_slides()
print(hrule())
if state.interactive:
@@ -348,10 +326,6 @@ while True:
print("n/→ Next")
print("s Simulate keystrokes")
print("v Validate exit status")
print("d Switch desktop")
print("k Sync slides")
print("o Open links")
print("h Run hidden commands")
print("g Go to a specific slide")
print("q Quit")
print("c Continue non-interactively until next error")
@@ -367,14 +341,6 @@ while True:
state.simulate_type = not state.simulate_type
elif command == "v":
state.verify_status = not state.verify_status
elif command == "d":
state.switch_desktop = not state.switch_desktop
elif command == "k":
state.sync_slides = not state.sync_slides
elif command == "o":
state.open_links = not state.open_links
elif command == "h":
state.run_hidden = not state.run_hidden
elif command == "g":
state.slide = click.prompt("Enter slide number", type=int)
state.snippet = 0
@@ -400,7 +366,7 @@ while True:
logging.info("Running with method {}: {}".format(method, data))
if method == "keys":
send_keys(data)
elif method == "bash" or (method == "hide" and state.run_hidden):
elif method == "bash":
# Make sure that we're ready
wait_for_prompt()
# Strip leading spaces
@@ -439,12 +405,11 @@ while True:
screen = capture_pane()
url = data.replace("/node1", "/{}".format(IPADDR))
# This should probably be adapted to run on different OS
if state.open_links:
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
subprocess.check_output(["xdg-open", url])
focus_browser()
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
else:
logging.warning("Unknown method {}: {!r}".format(method, data))
move_forward()

View File

@@ -1 +0,0 @@
click

View File

@@ -189,9 +189,7 @@ done
```bash
if which kubectl; then
kubectl get deploy,ds -o name | xargs -rn1 kubectl delete
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete --ignore-not-found=true
kubectl -n kube-system get deploy,svc -o name | grep -v dns | xargs -rn1 kubectl -n kube-system delete
kubectl get all -o name | grep -v service/kubernetes | xargs -rn1 kubectl delete
fi
```
-->
@@ -214,7 +212,7 @@ If anything goes wrong — ask for help!
- Use something like
[Play-With-Docker](http://play-with-docker.com/) or
[Play-With-Kubernetes](https://training.play-with-kubernetes.com/)
[Play-With-Kubernetes](https://medium.com/@marcosnils/introducing-pwk-play-with-k8s-159fcfeb787b)
Zero setup effort; but environment are short-lived and
might have limited resources

View File

@@ -8,9 +8,8 @@
<!--
```bash
cd ~
if [ -d container.training ]; then
mv container.training container.training.$RANDOM
mv container.training container.training.$$
fi
```
-->

View File

@@ -113,13 +113,7 @@ for item in items:
1: "st", 2: "nd", 3: "rd",
21: "st", 22: "nd", 23: "rd",
31: "st"}.get(date.day, "th")
# %e is a non-standard extension (it displays the day, but without a
# leading zero). If strftime fails with ValueError, try to fall back
# on %d (which displays the day but with a leading zero when needed).
try:
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
except ValueError:
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
today = datetime.date.today()
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]

View File

@@ -1,43 +1,3 @@
- date: 2018-11-23
city: Copenhagen
country: dk
event: GOTO
title: Build Container Orchestration with Docker Swarm
speaker: bretfisher
attend: https://gotocph.com/2018/workshops/121
- date: 2018-11-08
city: San Francisco, CA
country: us
event: QCON
title: Introduction to Docker and Containers
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
- date: 2018-11-09
city: San Francisco, CA
country: us
event: QCON
title: Getting Started With Kubernetes and Container Orchestration
speaker: jpetazzo
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration
- date: 2018-10-31
city: London, UK
country: uk
event: Velocity EU
title: Kubernetes 101
speaker: bridgetkromhout
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149
- date: 2018-10-30
city: London, UK
country: uk
event: Velocity EU
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71231
- date: 2018-07-12
city: Minneapolis, MN
country: us
@@ -63,14 +23,6 @@
speaker: jpetazzo
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/69875
- date: 2018-09-30
city: New York, NY
country: us
event: Velocity
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
speaker: bretfisher
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70147
- date: 2018-09-17
country: fr
city: Paris

View File

@@ -13,47 +13,47 @@ exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
- logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- intro/Docker_Machine.md
- - intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -13,47 +13,47 @@ exclude:
- in-person
chapters:
- shared/title.md
# - shared/logistics.md
- containers/intro.md
- shared/about-slides.md
- shared/toc.md
- - containers/Docker_Overview.md
- containers/Docker_History.md
- containers/Training_Environment.md
- containers/Installing_Docker.md
- containers/First_Containers.md
- containers/Background_Containers.md
- containers/Start_And_Attach.md
- - containers/Initial_Images.md
- containers/Building_Images_Interactively.md
- containers/Building_Images_With_Dockerfiles.md
- containers/Cmd_And_Entrypoint.md
- containers/Copying_Files_During_Build.md
- - containers/Multi_Stage_Builds.md
- containers/Publishing_To_Docker_Hub.md
- containers/Dockerfile_Tips.md
- - containers/Naming_And_Inspecting.md
- containers/Labels.md
- containers/Getting_Inside.md
- - containers/Container_Networking_Basics.md
- containers/Network_Drivers.md
- containers/Container_Network_Model.md
#- containers/Connecting_Containers_With_Links.md
- containers/Ambassadors.md
- - containers/Local_Development_Workflow.md
- containers/Working_With_Volumes.md
- containers/Compose_For_Dev_Stacks.md
- containers/Docker_Machine.md
- - containers/Advanced_Dockerfiles.md
- containers/Application_Configuration.md
- containers/Logging.md
- containers/Resource_Limits.md
- - containers/Namespaces_Cgroups.md
- containers/Copy_On_Write.md
#- containers/Containers_From_Scratch.md
- - containers/Container_Engines.md
- containers/Ecosystem.md
- containers/Orchestration_Overview.md
- shared/thankyou.md
- containers/links.md
- common/title.md
# - common/logistics.md
- intro/intro.md
- common/about-slides.md
- common/toc.md
- - intro/Docker_Overview.md
- intro/Docker_History.md
- intro/Training_Environment.md
- intro/Installing_Docker.md
- intro/First_Containers.md
- intro/Background_Containers.md
- intro/Start_And_Attach.md
- - intro/Initial_Images.md
- intro/Building_Images_Interactively.md
- intro/Building_Images_With_Dockerfiles.md
- intro/Cmd_And_Entrypoint.md
- intro/Copying_Files_During_Build.md
- - intro/Multi_Stage_Builds.md
- intro/Publishing_To_Docker_Hub.md
- intro/Dockerfile_Tips.md
- - intro/Naming_And_Inspecting.md
- intro/Labels.md
- intro/Getting_Inside.md
- - intro/Container_Networking_Basics.md
- intro/Network_Drivers.md
- intro/Container_Network_Model.md
#- intro/Connecting_Containers_With_Links.md
- intro/Ambassadors.md
- - intro/Local_Development_Workflow.md
- intro/Working_With_Volumes.md
- intro/Compose_For_Dev_Stacks.md
- intro/Docker_Machine.md
- - intro/Advanced_Dockerfiles.md
- intro/Application_Configuration.md
- intro/Logging.md
- intro/Resource_Limits.md
- - intro/Namespaces_Cgroups.md
- intro/Copy_On_Write.md
#- intro/Containers_From_Scratch.md
- - intro/Container_Engines.md
- intro/Ecosystem.md
- intro/Orchestration_Overview.md
- common/thankyou.md
- intro/links.md

View File

@@ -312,7 +312,7 @@ CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
EXPOSE 5000
```
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
(Source: [traininghweels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
---

View File

@@ -1,179 +0,0 @@
# Accessing the API with `kubectl proxy`
- The API requires us to authenticate.red[¹]
- There are many authentication methods available, including:
- TLS client certificates
<br/>
(that's what we've used so far)
- HTTP basic password authentication
<br/>
(from a static file; not recommended)
- various token mechanisms
<br/>
(detailed in the [documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies))
.red[¹]OK, we lied. If you don't authenticate, you are considered to
be user `system:anonymous`, which doesn't have any access rights by default.
---
## Accessing the API directly
- Let's see what happens if we try to access the API directly with `curl`
.exercise[
- Retrieve the ClusterIP allocated to the `kubernetes` service:
```bash
kubectl get svc kubernetes
```
- Replace the IP below and try to connect with `curl`:
```bash
curl -k https://`10.96.0.1`/
```
]
The API will tell us that user `system:anonymous` cannot access this path.
---
## Authenticating to the API
If we wanted to talk to the API, we would need to:
- extract our TLS key and certificate information from `~/.kube/config`
(the information is in PEM format, encoded in base64)
- use that information to present our certificate when connecting
(for instance, with `openssl s_client -key ... -cert ... -connect ...`)
- figure out exactly which credentials to use
(once we start juggling multiple clusters)
- change that whole process if we're using another authentication method
🤔 There has to be a better way!
---
## Using `kubectl proxy` for authentication
- `kubectl proxy` runs a proxy in the foreground
- This proxy lets us access the Kubernetes API without authentication
(`kubectl proxy` adds our credentials on the fly to the requests)
- This proxy lets us access the Kubernetes API over plain HTTP
- This is a great tool to learn and experiment with the Kubernetes API
- ... And for serious usages as well (suitable for one-shot scripts)
- For unattended use, it is better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
---
## Trying `kubectl proxy`
- Let's start `kubectl proxy` and then do a simple request with `curl`!
.exercise[
- Start `kubectl proxy` in the background:
```bash
kubectl proxy &
```
- Access the API's default route:
```bash
curl localhost:8001
```
- Terminate the proxy:
```bash
kill %1
```
]
The output is a list of available API routes.
---
## `kubectl proxy` is intended for local use
- By default, the proxy listens on port 8001
(But this can be changed, or we can tell `kubectl proxy` to pick a port)
- By default, the proxy binds to `127.0.0.1`
(Making it unreachable from other machines, for security reasons)
- By default, the proxy only accepts connections from:
`^localhost$,^127\.0\.0\.1$,^\[::1\]$`
- This is great when running `kubectl proxy` locally
- Not-so-great when you want to connect to the proxy from a remote machine
---
## Running `kubectl proxy` on a remote machine
- If we wanted to connect to the proxy from another machine, we would need to:
- bind to `INADDR_ANY` instead of `127.0.0.1`
- accept connections from any address
- This is achieved with:
```
kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.*
```
.warning[Do not do this on a real cluster: it opens full unauthenticated access!]
---
## Security considerations
- Running `kubectl proxy` openly is a huge security risk
- It is slightly better to run the proxy where you need it
(and copy credentials, e.g. `~/.kube/config`, to that place)
- It is even better to use a limited account with reduced permissions
---
## Good to know ...
- `kubectl proxy` also gives access to all internal services
- Specifically, services are exposed as such:
```
/api/v1/namespaces/<namespace>/services/<service>/proxy
```
- We can use `kubectl proxy` to access an internal service in a pinch
(or, for non HTTP services, `kubectl port-forward`)
- This is not very useful when running `kubectl` directly on the cluster
(since we could connect to the services directly anyway)
- But it is very powerful as soon as you run `kubectl` from a remote machine

View File

@@ -1,268 +0,0 @@
# Network policies
- Namespaces help us to *organize* resources
- Namespaces do not provide isolation
- By default, every pod can contact every other pod
- By default, every service accepts traffic from anyone
- If we want this to be different, we need *network policies*
---
## What's a network policy?
A network policy is defined by the following things.
- A *pod selector* indicating which pods it applies to
e.g.: "all pods in namespace `blue` with the label `zone=internal`"
- A list of *ingress rules* indicating which inbound traffic is allowed
e.g.: "TCP connections to ports 8000 and 8080 coming from pods with label `zone=dmz`,
and from the external subnet 4.42.6.0/24, except 4.42.6.5"
- A list of *egress rules* indicating which outbound traffic is allowed
A network policy can provide ingress rules, egress rules, or both.
---
## How do network policies apply?
- A pod can be "selected" by any number of network policies
- If a pod isn't selected by any network policy, then its traffic is unrestricted
(In other words: in the absence of network policies, all traffic is allowed)
- If a pod is selected by at least one network policy, then all traffic is blocked ...
... unless it is explicitly allowed by one of these network policies
---
class: extra-details
## Traffic filtering is flow-oriented
- Network policies deal with *connections*, not individual packets
- Example: to allow HTTP (80/tcp) connections to pod A, you only need an ingress rule
(You do not need a matching egress rule to allow response traffic to go through)
- This also applies for UDP traffic
(Allowing DNS traffic can be done with a single rule)
- Network policy implementations use stateful connection tracking
---
## Pod-to-pod traffic
- Connections from pod A to pod B have to be allowed by both pods:
- pod A has to be unrestricted, or allow the connection as an *egress* rule
- pod B has to be unrestricted, or allow the connection as an *ingress* rule
- As a consequence: if a network policy restricts traffic going from/to a pod,
<br/>
the restriction cannot be overridden by a network policy selecting another pod
- This prevents an entity managing network policies in namespace A
(but without permission to do so in namespace B)
from adding network policies giving them access to namespace B
---
## The rationale for network policies
- In network security, it is generally considered better to "deny all, then allow selectively"
(The other approach, "allow all, then block selectively" makes it too easy to leave holes)
- As soon as one network policy selects a pod, the pod enters this "deny all" logic
- Further network policies can open additional access
- Good network policies should be scoped as precisely as possible
- In particular: make sure that the selector is not too broad
(Otherwise, you end up affecting pods that were otherwise well secured)
---
## Our first network policy
This is our game plan:
- run a web server in a pod
- create a network policy to block all access to the web server
- create another network policy to allow access only from specific pods
---
## Running our test web server
.exercise[
- Let's use the `nginx` image:
```bash
kubectl run testweb --image=nginx
```
- Find out the IP address of the pod with one of these two commands:
```bash
kubectl get pods -o wide -l run=testweb
IP=$(kubectl get pods -l run=testweb -o json | jq -r .items[0].status.podIP)
```
- Check that we can connect to the server:
```bash
curl $IP
```
]
The `curl` command should show us the "Welcome to nginx!" page.
---
## Adding a very restrictive network policy
- The policy will select pods with the label `run=testweb`
- It will specify an empty list of ingress rules (matching nothing)
.exercise[
- Apply the policy in this YAML file:
```bash
kubectl apply -f ~/container.training/k8s/netpol-deny-all-for-testweb.yaml
```
- Check if we can still access the server:
```bash
curl $IP
```
]
The `curl` command should now time out.
---
## Looking at the network policy
This is the file that we applied:
```yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: deny-all-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress: []
```
---
## Allowing connections only from specific pods
- We want to allow traffic from pods with the label `run=testcurl`
- Reminder: this label is automatically applied when we do `kubectl run testcurl ...`
.exercise[
- Apply another policy:
```bash
kubectl apply -f ~/container.training/netpol-allow-testcurl-for-testweb.yaml
```
]
---
## Looking at the network policy
This is the second file that we applied:
```yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: allow-testcurl-for-testweb
spec:
podSelector:
matchLabels:
run: testweb
ingress:
- from:
- podSelector:
matchLabels:
run: testcurl
```
---
## Testing the network policy
- Let's create pods with, and without, the required label
.exercise[
- Try to connect to testweb from a pod with the `run=testcurl` label:
```bash
kubectl run testcurl --rm -i --image=centos -- curl -m3 $IP
```
- Try to connect to testweb with a different label:
```bash
kubectl run testkurl --rm -i --image=centos -- curl -m3 $IP
```
]
The first command will work (and show the "Welcome to nginx!" page).
The second command will fail and time out after 3 seconds.
(The timeout is obtained with the `-m3` option.)
---
## An important warning
- Some network plugins only have partial support for network policies
- For instance, Weave [doesn't support ipBlock (yet)](https://github.com/weaveworks/weave/issues/3168)
- Weave added support for egress rules [in version 2.4](https://github.com/weaveworks/weave/pull/3313) (released in July 2018)
- Unsupported features might be silently ignored
(Making you believe that you are secure, when you're not)
---
## Further resources
- As always, the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) is a good starting point
- And two resources by [Ahmet Alp Balkan](https://ahmet.im/):
- a [very good talk about network policies](https://www.youtube.com/watch?list=PLj6h78yzYM2P-3-xqvmWaZbbI1sW-ulZb&v=3gGpMmYeEO8) at KubeCon North America 2017
- a repository of [ready-to-use recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) for network policies

View File

@@ -14,35 +14,34 @@ exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
#- shared/composescale.md
- shared/composedown.md
- - k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- k8s/kubectlrun.md
- - k8s/kubectlexpose.md
- k8s/ourapponkube.md
- k8s/kubectlproxy.md
- k8s/dashboard.md
- - k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
- k8s/netpol.md
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
#- common/composescale.md
- common/composedown.md
- - kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- kube/kubectlrun.md
- - kube/kubectlexpose.md
- kube/ourapponkube.md
- kube/kubectlproxy.md
- kube/dashboard.md
- - kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
#- kube/logs-cli.md
#- kube/logs-centralized.md
#- kube/helm.md
#- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
- common/thankyou.md

View File

@@ -2,8 +2,8 @@ title: |
Kubernetes 101
#chat: "[Slack](https://dockercommunity.slack.com/messages/C7GKACWDV)"
#chat: "[Gitter](https://gitter.im/jpetazzo/training-20180413-paris)"
chat: "In person!"
chat: "[Gitter](https://gitter.im/jpetazzo/workshop-20180717-portland)"
#chat: "In person!"
gitrepo: github.com/jpetazzo/container.training
@@ -13,41 +13,40 @@ exclude:
- self-paced
chapters:
- shared/title.md
- common/title.md
#- logistics.md
# Bridget-specific; others use logistics.md
- logistics-bridget.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
# Bridget doesn't go into as much depth with compose
#- shared/composescale.md
- shared/composedown.md
- k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- - k8s/kubectlrun.md
- k8s/kubectlexpose.md
- k8s/ourapponkube.md
#- k8s/kubectlproxy.md
- - k8s/dashboard.md
- k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- - k8s/logs-cli.md
#- common/composescale.md
- common/composedown.md
- kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- - kube/kubectlrun.md
- kube/kubectlexpose.md
- kube/ourapponkube.md
#- kube/kubectlproxy.md
- - kube/dashboard.md
- kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
- - kube/logs-cli.md
# Bridget hasn't added EFK yet
#- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
#- k8s/netpol.md
- k8s/whatsnext.md
# - k8s/links.md
#- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
# - kube/links.md
# Bridget-specific
- k8s/links-bridget.md
- shared/thankyou.md
- kube/links-bridget.md
- common/thankyou.md

View File

@@ -13,35 +13,34 @@ exclude:
- in-person
chapters:
- shared/title.md
- common/title.md
#- logistics.md
- k8s/intro.md
- shared/about-slides.md
- shared/toc.md
- - shared/prereqs.md
- k8s/versions-k8s.md
- shared/sampleapp.md
- shared/composescale.md
- shared/composedown.md
- - k8s/concepts-k8s.md
- shared/declarative.md
- k8s/declarative.md
- k8s/kubenet.md
- k8s/kubectlget.md
- k8s/setup-k8s.md
- k8s/kubectlrun.md
- - k8s/kubectlexpose.md
- k8s/ourapponkube.md
- k8s/kubectlproxy.md
- k8s/dashboard.md
- - k8s/kubectlscale.md
- k8s/daemonset.md
- k8s/rollout.md
- - k8s/logs-cli.md
- k8s/logs-centralized.md
- k8s/helm.md
- k8s/namespaces.md
- k8s/netpol.md
- k8s/whatsnext.md
- k8s/links.md
- shared/thankyou.md
- kube/intro.md
- common/about-slides.md
- common/toc.md
- - common/prereqs.md
- kube/versions-k8s.md
- common/sampleapp.md
- common/composescale.md
- common/composedown.md
- - kube/concepts-k8s.md
- common/declarative.md
- kube/declarative.md
- kube/kubenet.md
- kube/kubectlget.md
- kube/setup-k8s.md
- kube/kubectlrun.md
- - kube/kubectlexpose.md
- kube/ourapponkube.md
- kube/kubectlproxy.md
- kube/dashboard.md
- - kube/kubectlscale.md
- kube/daemonset.md
- kube/rollout.md
- - kube/logs-cli.md
- kube/logs-centralized.md
- kube/helm.md
- kube/namespaces.md
- kube/whatsnext.md
- kube/links.md
- common/thankyou.md

View File

@@ -161,7 +161,7 @@ class: pic
(This is illustrated on the first "super complicated" schema)
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
- In some hosted Kubernetes offerings (e.g. GKE), the control plane is invisible
(We only "see" a Kubernetes API endpoint)

View File

@@ -95,21 +95,10 @@ Note: `--export` will remove "cluster-specific" information, i.e.:
- Change `kind: Deployment` to `kind: DaemonSet`
<!--
```bash vim rng.yml```
```wait kind: Deployment```
```keys /Deployment```
```keys ^J```
```keys cwDaemonSet```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Save, quit
- Try to create our new resource:
```
```bash
kubectl apply -f rng.yml
```
@@ -141,7 +130,6 @@ We all knew this couldn't be that easy, right!
- remove the `replicas` field
- remove the `strategy` field (which defines the rollout mechanism for a deployment)
- remove the `progressDeadlineSeconds` field (also used by the rollout mechanism)
- remove the `status: {}` line at the end
--
@@ -431,35 +419,11 @@ Of course, option 2 offers more learning opportunities. Right?
kubectl edit daemonset rng
```
<!--
```wait Please edit the object below```
```keys /run: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys /run: rng```
```keys ^J```
```keys oisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
- Update the service to add `isactive: "yes"` to its selector:
```bash
kubectl edit service rng
```
<!--
```wait Please edit the object below```
```keys /run: rng```
```keys ^J```
```keys noisactive: "yes"```
```keys ^[``` ]
```keys :wq```
```keys ^J```
-->
]
---

View File

@@ -32,11 +32,15 @@ There is an additional step to make the dashboard available from outside (we'll
- Create all the dashboard resources, with the following command:
```bash
kubectl apply -f ~/container.training/k8s/kubernetes-dashboard.yaml
kubectl apply -f https://goo.gl/Qamqab
```
]
The goo.gl URL expands to:
<br/>
.small[https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml]
---
@@ -68,11 +72,15 @@ There is an additional step to make the dashboard available from outside (we'll
- Apply the convenient YAML file, and defeat SSL protection:
```bash
kubectl apply -f ~/container.training/k8s/socat.yaml
kubectl apply -f https://goo.gl/tA7GLz
```
]
The goo.gl URL expands to:
<br/>
.small[.small[https://gist.githubusercontent.com/jpetazzo/c53a28b5b7fdae88bc3c5f0945552c04/raw/da13ef1bdd38cc0e90b7a4074be8d6a0215e1a65/socat.yaml]]
.warning[All our dashboard traffic is now clear-text, including passwords!]
---
@@ -127,7 +135,7 @@ The dashboard will then ask you which authentication you want to use.
- Grant admin privileges to the dashboard so we can see our resources:
```bash
kubectl apply -f ~/container.training/k8s/grant-admin-to-dashboard.yaml
kubectl apply -f https://goo.gl/CHsLTA
```
- Reload the dashboard and enjoy!
@@ -153,7 +161,7 @@ The dashboard will then ask you which authentication you want to use.
.exercise[
- Edit the service:
```
```bash
kubectl edit service kubernetes-dashboard
```
@@ -167,7 +175,7 @@ The dashboard will then ask you which authentication you want to use.
## Editing the `kubernetes-dashboard` service
- If we look at the [YAML](https://github.com/jpetazzo/container.training/blob/master/k8s/kubernetes-dashboard.yaml) that we loaded before, we'll get a hint
- If we look at the [YAML](https://goo.gl/Qamqab) that we loaded before, we'll get a hint
--
@@ -184,16 +192,6 @@ The dashboard will then ask you which authentication you want to use.
- Change `ClusterIP` to `NodePort`, save, and exit
<!--
```wait Please edit the object below```
```keys /ClusterIP```
```keys ^J```
```keys cwNodePort```
```keys ^[ ``` ]
```keys :wq```
```keys ^J```
-->
- Check the port that was assigned with `kubectl -n kube-system get services`
- Connect to https://oneofournodes:3xxxx/ (yes, https)

View File

@@ -34,47 +34,27 @@
## Installing Helm
- If the `helm` CLI is not installed in your environment, install it
- We need to install the `helm` CLI; then use it to deploy `tiller`
.exercise[
- Check if `helm` is installed:
```bash
helm
```
- If it's not installed, run the following command:
- Install the `helm` CLI:
```bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
```
]
---
## Installing Tiller
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
- They can be managed (installed, upgraded...) with the `helm` CLI
.exercise[
- Deploy Tiller:
- Deploy `tiller`:
```bash
helm init
```
- Add the `helm` completion:
```bash
. <(helm completion $(basename $SHELL))
```
]
If Tiller was already installed, don't worry: this won't break it.
At the end of the install process, you will see:
```
Happy Helming!
```
---
## Fix account permissions

View File

@@ -69,10 +69,7 @@ The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
kubectl get pods -w
```
<!--
```wait elastic-```
```keys ^C```
-->
<!-- ```keys ^C``` -->
]
@@ -131,11 +128,6 @@ Note: please DO NOT call the service `search`. It would collide with the TLD.
IP=$(kubectl get svc elastic -o go-template --template '{{ .spec.clusterIP }}')
```
<!--
```hide kubectl wait deploy elastic --for condition=available```
```hide sleep 5``` (give some time for elasticsearch to start... hopefully this is enough!)
-->
- Send a few requests:
```bash
curl http://$IP:9200/

117
slides/kube/kubectlproxy.md Normal file
View File

@@ -0,0 +1,117 @@
# Accessing internal services with `kubectl proxy`
- `kubectl proxy` runs a proxy in the foreground
- This proxy lets us access the Kubernetes API without authentication
(`kubectl proxy` adds our credentials on the fly to the requests)
- This proxy lets us access the Kubernetes API over plain HTTP
- This is a great tool to learn and experiment with the Kubernetes API
- The Kubernetes API also gives us a proxy to HTTP and HTTPS services
- Therefore, we can use `kubectl proxy` to access internal services
(Without using a `NodePort` or similar service)
---
## Secure by default
- By default, the proxy listens on port 8001
(But this can be changed, or we can tell `kubectl proxy` to pick a port)
- By default, the proxy binds to `127.0.0.1`
(Making it unreachable from other machines, for security reasons)
- By default, the proxy only accepts connections from:
`^localhost$,^127\.0\.0\.1$,^\[::1\]$`
- This is great when running `kubectl proxy` locally
- Not-so-great when running it on a remote machine
---
## Running `kubectl proxy` on a remote machine
- We are going to bind to `INADDR_ANY` instead of `127.0.0.1`
- We are going to accept connections from any address
.exercise[
- Run an open proxy to the Kubernetes API:
```bash
kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.*
```
]
.warning[Anyone can now do whatever they want with our Kubernetes cluster!
<br/>
(Don't do this on a real cluster!)]
---
## Viewing available API routes
- The default route (i.e. `/`) shows a list of available API endpoints
.exercise[
- Point your browser to the IP address of the node running `kubectl proxy`, port 8888
]
The result should look like this:
```json
{
"paths": [
"/api",
"/api/v1",
"/apis",
"/apis/",
"/apis/admissionregistration.k8s.io",
```
---
## Connecting to a service through the proxy
- The API can proxy HTTP and HTTPS requests by accessing a special route:
```
/api/v1/namespaces/`name_of_namespace`/services/`name_of_service`/proxy
```
- Since we now have access to the API, we can use this special route
.exercise[
- Access the `hasher` service through the special proxy route:
```open
http://`X.X.X.X`:8888/api/v1/namespaces/default/services/hasher/proxy
```
]
You should see the banner of the hasher service: `HASHER running on ...`
---
## Stopping the proxy
- Remember: as it is running right now, `kubectl proxy` gives open access to our cluster
.exercise[
- Stop the `kubectl proxy` process with Ctrl-C
]

View File

@@ -26,8 +26,6 @@
kubectl run pingpong --image alpine ping 1.1.1.1
```
<!-- ```hide kubectl wait deploy/pingpong --for condition=available``` -->
]
--
@@ -198,13 +196,10 @@ We could! But the *deployment* would notice it right away, and scale back to the
<!--
```wait Running```
```keys ^C```
```hide kubectl wait deploy pingpong --for condition=available```
```keys kubectl delete pod ping```
```copypaste pong-..........-.....```
-->
- Destroy a pod:
```
```bash
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
]

View File

@@ -10,12 +10,7 @@
kubectl get deployments -w
```
<!--
```wait RESTARTS```
```keys ^C```
```wait AVAILABLE```
```keys ^C```
-->
<!-- ```keys ^C``` -->
- Now, create more `worker` replicas:
```bash

View File

@@ -40,12 +40,12 @@
- Load the YAML file into our cluster:
```bash
kubectl apply -f ~/container.training/k8s/efk.yaml
kubectl apply -f https://goo.gl/MUZhE4
```
]
If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that
If we [look at the YAML file](https://goo.gl/MUZhE4), we see that
it creates a daemon set, two deployments, two services,
and a few roles and role bindings (to give fluentd the required permissions).
@@ -113,7 +113,7 @@ and a few roles and role bindings (to give fluentd the required permissions).
- The first time you connect to Kibana, you must "configure an index pattern"
- Just use the one that is suggested, `@timestamp`.red[*]
- Just use the one that is suggested, `@timestamp`
- Then click "Discover" (in the top-left corner)
@@ -123,9 +123,6 @@ and a few roles and role bindings (to give fluentd the required permissions).
`kubernetes.host`, `kubernetes.pod_name`, `stream`, `log`
.red[*]If you don't see `@timestamp`, it's probably because no logs exist yet.
<br/>Wait a bit, and double-check the logging pipeline!
---
## Caveat emptor

View File

@@ -47,25 +47,23 @@ Exactly what we need!
## Installing Stern
- Run `stern` (without arguments) to check if it's installed:
- For simplicity, let's just grab a binary release
```
$ stern
Tail multiple pods and containers from Kubernetes
.exercise[
Usage:
stern pod-query [flags]
```
- If it is not installed, the easiest method is to download a [binary release](https://github.com/wercker/stern/releases)
- The following commands will install Stern on a Linux Intel 64 bits machine:
- Download a binary release from GitHub:
```bash
sudo curl -L -o /usr/local/bin/stern \
https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
https://github.com/wercker/stern/releases/download/1.6.0/stern_linux_amd64
sudo chmod +x /usr/local/bin/stern
```
]
These installation instructions will work on our clusters, since they are Linux amd64 VMs.
However, you will have to adapt them if you want to install Stern on your local machine.
---
## Using Stern

View File

@@ -114,8 +114,6 @@ In this part, we will:
.exercise[
<!-- ```hide kubectl wait deploy/registry --for condition=available```-->
- View the repositories currently held in our registry:
```bash
curl $REGISTRY/v2/_catalog
@@ -277,11 +275,6 @@ class: extra-details
.exercise[
<!-- ```hide
kubectl wait deploy/rng --for condition=available
kubectl wait deploy/worker --for condition=available
``` -->
- Look at some logs:
```bash
kubectl logs deploy/rng
@@ -335,8 +328,9 @@ kubectl wait deploy/worker --for condition=available
(Give it about 10 seconds to recover)
<!--
```wait units of work done, updating hash counter```
```keys ^C```
```keys
^C
```
-->
]

View File

@@ -96,10 +96,7 @@
kubectl get deployments -w
```
<!--
```wait NAME```
```keys ^C```
-->
<!-- ```keys ^C``` -->
- Update `worker` either with `kubectl edit`, or by running:
```bash
@@ -153,11 +150,6 @@ That rollout should be pretty quick. What shows in the web UI?
kubectl rollout status deploy worker
```
<!--
```wait Waiting for deployment```
```keys ^C```
-->
]
--

View File

@@ -10,7 +10,7 @@
2. Install Kubernetes packages
3. Run `kubeadm init` on the first node (it deploys the control plane on that node)
3. Run `kubeadm init` on the master node
4. Set up Weave (the overlay network)
<br/>

View File

@@ -93,37 +93,13 @@ And *then* it is time to look at orchestration!
---
## Logging
## Logging and metrics
- Logging is delegated to the container engine
- Logs are exposed through the API
- Logs are also accessible through local files (`/var/log/containers`)
- Log shipping to a central platform is usually done through these files
(e.g. with an agent bind-mounting the log directory)
---
## Metrics
- The kubelet embeds [cAdvisor](https://github.com/google/cadvisor), which exposes container metrics
(cAdvisor might be separated in the future for more flexibility)
- It is a good idea to start with [Prometheus](https://prometheus.io/)
(even if you end up using something else)
- Starting from Kubernetes 1.8, we can use the [Metrics API](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/)
- [Heapster](https://github.com/kubernetes/heapster) was a popular add-on
(but is being [deprecated](https://github.com/kubernetes/heapster/blob/master/docs/deprecation.md) starting with Kubernetes 1.11)
- Metrics are typically handled with [Prometheus](https://prometheus.io/)
([Heapster](https://github.com/kubernetes/heapster) is a popular add-on)
---

View File

@@ -2,15 +2,30 @@
- Hello! We are:
- .emoji[✨] Bridget ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
- .emoji[✨] Bridget Kromhout ([@bridgetkromhout](https://twitter.com/bridgetkromhout))
- .emoji[🌟] Joe ([@joelaha](https://twitter.com/joelaha))
- .emoji[🌟] Joe Laha ([@joelaha](https://twitter.com/joelaha))
- The workshop will run from 13:30-16:45
- .emoji[💁🏻‍♀️] Karen Chu ([@karenhchu](https://twitter.com/karenhchu))
- There will be a break from 15:00-15:15
- .emoji[🐳] Jérôme Petazzoni ([@jpetazzo](https://twitter.com/jpetazzo)) (joining us from Berlin in the chat room!)
- The workshop will run from 9:00-12:30
- There will be a break from 10:30-11:00
- Feel free to interrupt for questions at any time
- *Especially when you see full screen container pictures!*
---
## Say hi!
- We encourage networking at [#oscon](https://twitter.com/hashtag/oscon?f=tweets&vertical=default&src=hash)
- Take a minute to introduce yourself to your neighbors
- Tell them where you're from (where you're based out of & what org you work at)
- Share what you're hoping to learn in this session! .emoji[✨]

View File

@@ -1,17 +0,0 @@
.remark-slide-content:not(.pic) {
background-repeat: no-repeat;
background-position: 99% 1%;
background-size: 8%;
background-image: url(https://enix.io/static/img/logos/logo-domain-cropped.png);
}
div.extra-details:not(.pic) {
background-image: url("images/extra-details.png"), url(https://enix.io/static/img/logos/logo-domain-cropped.png);
background-position: 0.5% 1%, 99% 1%;
background-size: 4%, 8%;
}
.remark-slide-content:not(.pic) div.remark-slide-number {
top: 16px;
right: 112px
}

Some files were not shown because too many files have changed in this diff Show More