mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 10:09:56 +00:00
Compare commits
91 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2381510a0b | ||
|
|
732f06729f | ||
|
|
5687b204cd | ||
|
|
db8e8377ac | ||
|
|
510a37be44 | ||
|
|
29a925a50d | ||
|
|
7d67e23e89 | ||
|
|
651e6b720b | ||
|
|
c8cd845b7d | ||
|
|
230bd73597 | ||
|
|
7217c0ee1d | ||
|
|
51882896d4 | ||
|
|
77d455d894 | ||
|
|
39532c7547 | ||
|
|
4f9c8275d9 | ||
|
|
f11aae2514 | ||
|
|
f1e9efc38c | ||
|
|
975cc4f7df | ||
|
|
01243280a2 | ||
|
|
e652c3639d | ||
|
|
1e0954d9b4 | ||
|
|
bb21f9bbc9 | ||
|
|
25466e7950 | ||
|
|
78026ff9b8 | ||
|
|
60c7ef4e53 | ||
|
|
55952934ed | ||
|
|
3eaa844c55 | ||
|
|
f9d31f4c30 | ||
|
|
ec037e422b | ||
|
|
73f66f25d8 | ||
|
|
28174b6cf9 | ||
|
|
a80c095a07 | ||
|
|
374574717d | ||
|
|
efce5d1ad4 | ||
|
|
4eec91a9e6 | ||
|
|
57166f33aa | ||
|
|
f1ebb1f0fb | ||
|
|
8182e4df96 | ||
|
|
6f3580820c | ||
|
|
7b7fd2a4b4 | ||
|
|
f74addd0ca | ||
|
|
21ba3b7713 | ||
|
|
4eca15f822 | ||
|
|
4205f619cf | ||
|
|
c3dff823ef | ||
|
|
39876d1388 | ||
|
|
7e34aa0287 | ||
|
|
3bdafed38e | ||
|
|
3d438ff304 | ||
|
|
bcd1f37085 | ||
|
|
ba928e59fc | ||
|
|
62c01ef7d6 | ||
|
|
a71347e328 | ||
|
|
f235cfa13c | ||
|
|
45b397682b | ||
|
|
858ad02973 | ||
|
|
defeef093d | ||
|
|
b45615e2c3 | ||
|
|
b158babb7f | ||
|
|
59b7386b91 | ||
|
|
c05bcd23d9 | ||
|
|
3cb91855c8 | ||
|
|
dc0850ef3e | ||
|
|
ffdd7fda45 | ||
|
|
83b2133573 | ||
|
|
d04856f964 | ||
|
|
8373d5302f | ||
|
|
7d7cb0eadb | ||
|
|
c00c87f8f2 | ||
|
|
f599462ad7 | ||
|
|
018282f392 | ||
|
|
23b3c1c05a | ||
|
|
62686d0b7a | ||
|
|
54288502a2 | ||
|
|
efc045e40b | ||
|
|
6e9b16511f | ||
|
|
81b6e60a8c | ||
|
|
5baaf7e00a | ||
|
|
d4d460397f | ||
|
|
f66b6b2ee3 | ||
|
|
fb7f7fd8c8 | ||
|
|
dc98fa21a9 | ||
|
|
6b662d3e4c | ||
|
|
7069682c8e | ||
|
|
3b1d5b93a8 | ||
|
|
611fe55e90 | ||
|
|
481272ac22 | ||
|
|
9069e2d7db | ||
|
|
1144c16a4c | ||
|
|
9b2846633c | ||
|
|
db88c0a5bf |
62
k8s/consul.yaml
Normal file
62
k8s/consul.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.2.2"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
28
k8s/docker-build.yaml
Normal file
28
k8s/docker-build.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||
222
k8s/efk.yaml
Normal file
222
k8s/efk.yaml
Normal file
@@ -0,0 +1,222 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
# X-Pack Authentication
|
||||
# =====================
|
||||
- name: FLUENT_ELASTICSEARCH_USER
|
||||
value: "elastic"
|
||||
- name: FLUENT_ELASTICSEARCH_PASSWORD
|
||||
value: "changeme"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: elasticsearch
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5.6.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: elasticsearch
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /api/v1/namespaces/default/services/elasticsearch
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
run: elasticsearch
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: kibana
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5.6.8
|
||||
imagePullPolicy: Always
|
||||
name: kibana
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /api/v1/namespaces/default/services/kibana
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
run: kibana
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
14
k8s/grant-admin-to-dashboard.yaml
Normal file
14
k8s/grant-admin-to-dashboard.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
18
k8s/haproxy.cfg
Normal file
18
k8s/haproxy.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend the-frontend
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server bing.com-80 bing.com:80 maxconn 32 check
|
||||
|
||||
16
k8s/haproxy.yaml
Normal file
16
k8s/haproxy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
14
k8s/ingress.yaml
Normal file
14
k8s/ingress.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
29
k8s/kaniko-build.yaml
Normal file
29
k8s/kaniko-build.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--skip-tls-verify"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
|
||||
167
k8s/kubernetes-dashboard.yaml
Normal file
167
k8s/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-testcurl-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
run: testcurl
|
||||
|
||||
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-all-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
ingress: []
|
||||
|
||||
22
k8s/netpol-dockercoins.yaml
Normal file
22
k8s/netpol-dockercoins.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
21
k8s/nginx-with-volume.yaml
Normal file
21
k8s/nginx-with-volume.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
580
k8s/portworx.yaml
Normal file
580
k8s/portworx.yaml
Normal file
@@ -0,0 +1,580 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: stork-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.cfg: |-
|
||||
{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"extenders": [
|
||||
{
|
||||
"urlPrefix": "http://stork-service.kube-system.svc:8099",
|
||||
"apiVersion": "v1beta1",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 5,
|
||||
"enableHttps": false,
|
||||
"nodeCacheCapable": false
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "list", "watch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["deployments", "deployments/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["statefulsets", "statefulsets/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: stork-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
name: stork
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8099
|
||||
targetPort: 8099
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
tier: control-plane
|
||||
name: stork
|
||||
namespace: kube-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
name: stork
|
||||
tier: control-plane
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /stork
|
||||
- --driver=pxd
|
||||
- --verbose
|
||||
- --leader-elect=true
|
||||
- --health-monitor-interval=120
|
||||
imagePullPolicy: Always
|
||||
image: openstorage/stork:1.1.3
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
name: stork
|
||||
hostPID: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
serviceAccountName: stork-account
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-snapshot-sc
|
||||
provisioner: stork-snapshot
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resourceNames: ["kube-scheduler"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["delete", "get", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["bindings", "pods/binding"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers", "services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["app", "extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-scheduler-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
name: stork-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=true
|
||||
- --scheduler-name=stork
|
||||
- --policy-configmap=stork-config
|
||||
- --policy-configmap-namespace=kube-system
|
||||
- --lock-object-name=stork-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
name: stork-scheduler
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork-scheduler
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
hostPID: false
|
||||
serviceAccountName: stork-scheduler-account
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: portworx-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
selector:
|
||||
name: portworx
|
||||
ports:
|
||||
- name: px-api
|
||||
protocol: TCP
|
||||
port: 9001
|
||||
targetPort: 9001
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-get-put-list-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "get", "update", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "update", "create"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: node-get-put-list-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: portworx
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role
|
||||
namespace: portworx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role-binding
|
||||
namespace: portworx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: portworx
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true"
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: px/enabled
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
containers:
|
||||
- name: portworx
|
||||
image: portworx/oci-monitor:1.4.2.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
["-c", "px-workshop", "-s", "/dev/loop0", "-b",
|
||||
"-x", "kubernetes"]
|
||||
env:
|
||||
- name: "PX_TEMPLATE_VERSION"
|
||||
value: "v4"
|
||||
|
||||
livenessProbe:
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 840 # allow image pull in slow networks
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 9001
|
||||
readinessProbe:
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 9015
|
||||
terminationMessagePath: "/tmp/px-termination-log"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
mountPath: /etc/pwx
|
||||
- name: optpwx
|
||||
mountPath: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
mountPath: /host_proc/1/ns
|
||||
- name: sysdmount
|
||||
mountPath: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
mountPath: /var/cores
|
||||
- name: journalmount1
|
||||
mountPath: /var/run/log
|
||||
readOnly: true
|
||||
- name: journalmount2
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
- name: dbusmount
|
||||
mountPath: /var/run/dbus
|
||||
restartPolicy: Always
|
||||
serviceAccountName: px-account
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
hostPath:
|
||||
path: /etc/pwx
|
||||
- name: optpwx
|
||||
hostPath:
|
||||
path: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: sysdmount
|
||||
hostPath:
|
||||
path: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
hostPath:
|
||||
path: /var/cores
|
||||
- name: journalmount1
|
||||
hostPath:
|
||||
path: /var/run/log
|
||||
- name: journalmount2
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dbusmount
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-lh-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
nodePort: 32678
|
||||
- name: https
|
||||
port: 443
|
||||
nodePort: 32679
|
||||
selector:
|
||||
tier: px-web-console
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: px-web-console
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "init"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
containers:
|
||||
- name: px-lighthouse
|
||||
image: portworx/px-lighthouse:1.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
- name: config-sync
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "sync"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
serviceAccountName: px-lh-account
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
30
k8s/postgres.yaml
Normal file
30
k8s/postgres.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
15
k8s/registry.yaml
Normal file
15
k8s/registry.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
|
||||
67
k8s/socat.yaml
Normal file
67
k8s/socat.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
100
k8s/traefik.yaml
Normal file
100
k8s/traefik.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -93,7 +93,7 @@ wrap Run this program in a container
|
||||
- The `./workshopctl` script can be executed directly.
|
||||
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a Batch of AWS Instances for a Workshop
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ img {
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">training</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
|
||||
@@ -168,6 +168,22 @@ _cmd_kube() {
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
fi"
|
||||
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
#################################
|
||||
|
||||
@@ -54,9 +55,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
|
||||
# Add a "docker" user with password "training"
|
||||
# Add a "docker" user with password coming from the settings
|
||||
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:training | sudo chpasswd")
|
||||
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
|
||||
@@ -22,3 +22,6 @@ engine_version: test
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -20,5 +20,8 @@ paper_margin: 0.2in
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -85,7 +85,7 @@ img {
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">training</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
|
||||
@@ -22,3 +22,6 @@ engine_version: stable
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -22,3 +22,6 @@ engine_version: stable
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1 +1 @@
|
||||
/ /kube-halfday.yml.html 200!
|
||||
/ /weka.yml.html 200!
|
||||
|
||||
@@ -29,6 +29,10 @@ class State(object):
|
||||
self.interactive = True
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.switch_desktop = False
|
||||
self.sync_slides = False
|
||||
self.open_links = False
|
||||
self.run_hidden = True
|
||||
self.slide = 1
|
||||
self.snippet = 0
|
||||
|
||||
@@ -37,6 +41,10 @@ class State(object):
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
self.switch_desktop = bool(data["switch_desktop"])
|
||||
self.sync_slides = bool(data["sync_slides"])
|
||||
self.open_links = bool(data["open_links"])
|
||||
self.run_hidden = bool(data["run_hidden"])
|
||||
self.slide = int(data["slide"])
|
||||
self.snippet = int(data["snippet"])
|
||||
|
||||
@@ -46,6 +54,10 @@ class State(object):
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
switch_desktop=self.switch_desktop,
|
||||
sync_slides=self.sync_slides,
|
||||
open_links=self.open_links,
|
||||
run_hidden=self.run_hidden,
|
||||
slide=self.slide,
|
||||
snippet=self.snippet,
|
||||
), f, default_flow_style=False)
|
||||
@@ -122,14 +134,20 @@ class Slide(object):
|
||||
|
||||
|
||||
def focus_slides():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "3"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_terminal():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "2"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_browser():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "4"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
@@ -307,17 +325,21 @@ while True:
|
||||
slide = slides[state.slide]
|
||||
snippet = slide.snippets[state.snippet-1] if state.snippet else None
|
||||
click.clear()
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}]"
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
|
||||
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
|
||||
.format(state.slide, len(slides)-1,
|
||||
state.snippet, len(slide.snippets) if slide.snippets else 0,
|
||||
state.simulate_type, state.verify_status))
|
||||
state.simulate_type, state.verify_status,
|
||||
state.switch_desktop, state.sync_slides,
|
||||
state.open_links, state.run_hidden))
|
||||
print(hrule())
|
||||
if snippet:
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
focus_terminal()
|
||||
else:
|
||||
print(slide.content)
|
||||
subprocess.check_output(["./gotoslide.js", str(slide.number)])
|
||||
if state.sync_slides:
|
||||
subprocess.check_output(["./gotoslide.js", str(slide.number)])
|
||||
focus_slides()
|
||||
print(hrule())
|
||||
if state.interactive:
|
||||
@@ -326,6 +348,10 @@ while True:
|
||||
print("n/→ Next")
|
||||
print("s Simulate keystrokes")
|
||||
print("v Validate exit status")
|
||||
print("d Switch desktop")
|
||||
print("k Sync slides")
|
||||
print("o Open links")
|
||||
print("h Run hidden commands")
|
||||
print("g Go to a specific slide")
|
||||
print("q Quit")
|
||||
print("c Continue non-interactively until next error")
|
||||
@@ -341,6 +367,14 @@ while True:
|
||||
state.simulate_type = not state.simulate_type
|
||||
elif command == "v":
|
||||
state.verify_status = not state.verify_status
|
||||
elif command == "d":
|
||||
state.switch_desktop = not state.switch_desktop
|
||||
elif command == "k":
|
||||
state.sync_slides = not state.sync_slides
|
||||
elif command == "o":
|
||||
state.open_links = not state.open_links
|
||||
elif command == "h":
|
||||
state.run_hidden = not state.run_hidden
|
||||
elif command == "g":
|
||||
state.slide = click.prompt("Enter slide number", type=int)
|
||||
state.snippet = 0
|
||||
@@ -366,7 +400,7 @@ while True:
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash":
|
||||
elif method == "bash" or (method == "hide" and state.run_hidden):
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
@@ -405,11 +439,12 @@ while True:
|
||||
screen = capture_pane()
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
move_forward()
|
||||
|
||||
1
slides/autopilot/requirements.txt
Normal file
1
slides/autopilot/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
click
|
||||
@@ -1,21 +0,0 @@
|
||||
class: title, self-paced
|
||||
|
||||
@@TITLE@@
|
||||
|
||||
.nav[*Self-paced version*]
|
||||
|
||||
---
|
||||
|
||||
class: title, in-person
|
||||
|
||||
@@TITLE@@<br/></br>
|
||||
|
||||
.footnote[
|
||||
**Be kind to the WiFi!**<br/>
|
||||
<!-- *Use the 5G network.* -->
|
||||
*Don't use your hotspot.*<br/>
|
||||
*Don't stream videos or download big files during the workshop.*<br/>
|
||||
*Thank you!*
|
||||
|
||||
**Slides: @@SLIDES@@**
|
||||
]
|
||||
@@ -312,7 +312,7 @@ CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
|
||||
EXPOSE 5000
|
||||
```
|
||||
|
||||
(Source: [traininghweels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
|
||||
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
@@ -51,7 +51,7 @@ for line in open(sys.argv[1]):
|
||||
|
||||
state.show()
|
||||
|
||||
for chapter in sorted(state.chapters):
|
||||
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
|
||||
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
|
||||
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))
|
||||
|
||||
|
||||
@@ -113,7 +113,13 @@ for item in items:
|
||||
1: "st", 2: "nd", 3: "rd",
|
||||
21: "st", 22: "nd", 23: "rd",
|
||||
31: "st"}.get(date.day, "th")
|
||||
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
|
||||
# %e is a non-standard extension (it displays the day, but without a
|
||||
# leading zero). If strftime fails with ValueError, try to fall back
|
||||
# on %d (which displays the day but with a leading zero when needed).
|
||||
try:
|
||||
item["prettydate"] = date.strftime("%B %e{}, %Y").format(suffix)
|
||||
except ValueError:
|
||||
item["prettydate"] = date.strftime("%B %d{}, %Y").format(suffix)
|
||||
|
||||
today = datetime.date.today()
|
||||
coming_soon = [i for i in items if i.get("date") and i["date"] >= today]
|
||||
|
||||
@@ -1,3 +1,43 @@
|
||||
- date: 2018-11-23
|
||||
city: Copenhagen
|
||||
country: dk
|
||||
event: GOTO
|
||||
title: Build Container Orchestration with Docker Swarm
|
||||
speaker: bretfisher
|
||||
attend: https://gotocph.com/2018/workshops/121
|
||||
|
||||
- date: 2018-11-08
|
||||
city: San Francisco, CA
|
||||
country: us
|
||||
event: QCON
|
||||
title: Introduction to Docker and Containers
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/introduction-docker-and-containers
|
||||
|
||||
- date: 2018-11-09
|
||||
city: San Francisco, CA
|
||||
country: us
|
||||
event: QCON
|
||||
title: Getting Started With Kubernetes and Container Orchestration
|
||||
speaker: jpetazzo
|
||||
attend: https://qconsf.com/sf2018/workshop/getting-started-kubernetes-and-container-orchestration
|
||||
|
||||
- date: 2018-10-31
|
||||
city: London, UK
|
||||
country: uk
|
||||
event: Velocity EU
|
||||
title: Kubernetes 101
|
||||
speaker: bridgetkromhout
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71149
|
||||
|
||||
- date: 2018-10-30
|
||||
city: London, UK
|
||||
country: uk
|
||||
event: Velocity EU
|
||||
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
|
||||
speaker: bretfisher
|
||||
attend: https://conferences.oreilly.com/velocity/vl-eu/public/schedule/detail/71231
|
||||
|
||||
- date: 2018-07-12
|
||||
city: Minneapolis, MN
|
||||
country: us
|
||||
@@ -23,6 +63,14 @@
|
||||
speaker: jpetazzo
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/69875
|
||||
|
||||
- date: 2018-09-30
|
||||
city: New York, NY
|
||||
country: us
|
||||
event: Velocity
|
||||
title: "Docker Zero to Hero: Docker, Compose and Production Swarm"
|
||||
speaker: bretfisher
|
||||
attend: https://conferences.oreilly.com/velocity/vl-ny/public/schedule/detail/70147
|
||||
|
||||
- date: 2018-09-17
|
||||
country: fr
|
||||
city: Paris
|
||||
|
||||
@@ -13,47 +13,47 @@ exclude:
|
||||
- self-paced
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
- shared/title.md
|
||||
- logistics.md
|
||||
- intro/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
- intro/Background_Containers.md
|
||||
- intro/Start_And_Attach.md
|
||||
- - intro/Initial_Images.md
|
||||
- intro/Building_Images_Interactively.md
|
||||
- intro/Building_Images_With_Dockerfiles.md
|
||||
- intro/Cmd_And_Entrypoint.md
|
||||
- intro/Copying_Files_During_Build.md
|
||||
- - intro/Multi_Stage_Builds.md
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Labels.md
|
||||
- intro/Getting_Inside.md
|
||||
- - intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
#- intro/Connecting_Containers_With_Links.md
|
||||
- intro/Ambassadors.md
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Docker_Machine.md
|
||||
- - intro/Advanced_Dockerfiles.md
|
||||
- intro/Application_Configuration.md
|
||||
- intro/Logging.md
|
||||
- intro/Resource_Limits.md
|
||||
- - intro/Namespaces_Cgroups.md
|
||||
- intro/Copy_On_Write.md
|
||||
#- intro/Containers_From_Scratch.md
|
||||
- - intro/Container_Engines.md
|
||||
- intro/Ecosystem.md
|
||||
- intro/Orchestration_Overview.md
|
||||
- common/thankyou.md
|
||||
- intro/links.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
|
||||
@@ -13,47 +13,47 @@ exclude:
|
||||
- in-person
|
||||
|
||||
chapters:
|
||||
- common/title.md
|
||||
# - common/logistics.md
|
||||
- intro/intro.md
|
||||
- common/about-slides.md
|
||||
- common/toc.md
|
||||
- - intro/Docker_Overview.md
|
||||
- intro/Docker_History.md
|
||||
- intro/Training_Environment.md
|
||||
- intro/Installing_Docker.md
|
||||
- intro/First_Containers.md
|
||||
- intro/Background_Containers.md
|
||||
- intro/Start_And_Attach.md
|
||||
- - intro/Initial_Images.md
|
||||
- intro/Building_Images_Interactively.md
|
||||
- intro/Building_Images_With_Dockerfiles.md
|
||||
- intro/Cmd_And_Entrypoint.md
|
||||
- intro/Copying_Files_During_Build.md
|
||||
- - intro/Multi_Stage_Builds.md
|
||||
- intro/Publishing_To_Docker_Hub.md
|
||||
- intro/Dockerfile_Tips.md
|
||||
- - intro/Naming_And_Inspecting.md
|
||||
- intro/Labels.md
|
||||
- intro/Getting_Inside.md
|
||||
- - intro/Container_Networking_Basics.md
|
||||
- intro/Network_Drivers.md
|
||||
- intro/Container_Network_Model.md
|
||||
#- intro/Connecting_Containers_With_Links.md
|
||||
- intro/Ambassadors.md
|
||||
- - intro/Local_Development_Workflow.md
|
||||
- intro/Working_With_Volumes.md
|
||||
- intro/Compose_For_Dev_Stacks.md
|
||||
- intro/Docker_Machine.md
|
||||
- - intro/Advanced_Dockerfiles.md
|
||||
- intro/Application_Configuration.md
|
||||
- intro/Logging.md
|
||||
- intro/Resource_Limits.md
|
||||
- - intro/Namespaces_Cgroups.md
|
||||
- intro/Copy_On_Write.md
|
||||
#- intro/Containers_From_Scratch.md
|
||||
- - intro/Container_Engines.md
|
||||
- intro/Ecosystem.md
|
||||
- intro/Orchestration_Overview.md
|
||||
- common/thankyou.md
|
||||
- intro/links.md
|
||||
- shared/title.md
|
||||
# - shared/logistics.md
|
||||
- containers/intro.md
|
||||
- shared/about-slides.md
|
||||
- shared/toc.md
|
||||
- - containers/Docker_Overview.md
|
||||
- containers/Docker_History.md
|
||||
- containers/Training_Environment.md
|
||||
- containers/Installing_Docker.md
|
||||
- containers/First_Containers.md
|
||||
- containers/Background_Containers.md
|
||||
- containers/Start_And_Attach.md
|
||||
- - containers/Initial_Images.md
|
||||
- containers/Building_Images_Interactively.md
|
||||
- containers/Building_Images_With_Dockerfiles.md
|
||||
- containers/Cmd_And_Entrypoint.md
|
||||
- containers/Copying_Files_During_Build.md
|
||||
- - containers/Multi_Stage_Builds.md
|
||||
- containers/Publishing_To_Docker_Hub.md
|
||||
- containers/Dockerfile_Tips.md
|
||||
- - containers/Naming_And_Inspecting.md
|
||||
- containers/Labels.md
|
||||
- containers/Getting_Inside.md
|
||||
- - containers/Container_Networking_Basics.md
|
||||
- containers/Network_Drivers.md
|
||||
- containers/Container_Network_Model.md
|
||||
#- containers/Connecting_Containers_With_Links.md
|
||||
- containers/Ambassadors.md
|
||||
- - containers/Local_Development_Workflow.md
|
||||
- containers/Working_With_Volumes.md
|
||||
- containers/Compose_For_Dev_Stacks.md
|
||||
- containers/Docker_Machine.md
|
||||
- - containers/Advanced_Dockerfiles.md
|
||||
- containers/Application_Configuration.md
|
||||
- containers/Logging.md
|
||||
- containers/Resource_Limits.md
|
||||
- - containers/Namespaces_Cgroups.md
|
||||
- containers/Copy_On_Write.md
|
||||
#- containers/Containers_From_Scratch.md
|
||||
- - containers/Container_Engines.md
|
||||
- containers/Ecosystem.md
|
||||
- containers/Orchestration_Overview.md
|
||||
- shared/thankyou.md
|
||||
- containers/links.md
|
||||
|
||||
131
slides/k8s/accessinternal.md
Normal file
131
slides/k8s/accessinternal.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Accessing internal services
|
||||
|
||||
- When we are logged in on a cluster node, we can access internal services
|
||||
|
||||
(by virtue of the Kubernetes network model: all nodes can reach all pods and services)
|
||||
|
||||
- When we are accessing a remote cluster, things are different
|
||||
|
||||
(generally, our local machine won't have access to the cluster's internal subnet)
|
||||
|
||||
- How can we temporarily access a service without exposing it to everyone?
|
||||
|
||||
--
|
||||
|
||||
- `kubectl proxy`: gives us access to the API, which includes a proxy for HTTP resources
|
||||
|
||||
- `kubectl port-forward`: allows forwarding of TCP ports to arbitrary pods, services, ...
|
||||
|
||||
---
|
||||
|
||||
## Suspension of disbelief
|
||||
|
||||
The exercises in this section assume that we have set up `kubectl` on our
|
||||
local machine in order to access a remote cluster.
|
||||
|
||||
We will therefore show how to access services and pods of the remote cluster,
|
||||
from our local machine.
|
||||
|
||||
You can also run these exercises directly on the cluster (if you haven't
|
||||
installed and set up `kubectl` locally).
|
||||
|
||||
Running commands locally will be less useful
|
||||
(since you could access services and pods directly),
|
||||
but keep in mind that these commands will work anywhere as long as you have
|
||||
installed and set up `kubectl` to communicate with your cluster.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` in theory
|
||||
|
||||
- Running `kubectl proxy` gives us access to the entire Kubernetes API
|
||||
|
||||
- The API includes routes to proxy HTTP traffic
|
||||
|
||||
- These routes look like the following:
|
||||
|
||||
`/api/v1/namespaces/<namespace>/services/<service>/proxy`
|
||||
|
||||
- We just add the URI to the end of the request, for instance:
|
||||
|
||||
`/api/v1/namespaces/<namespace>/services/<service>/proxy/index.html`
|
||||
|
||||
- We can access `services` and `pods` this way
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` in practice
|
||||
|
||||
- Let's access the `webui` service through `kubectl proxy`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run an API proxy in the background:
|
||||
```bash
|
||||
kubectl proxy &
|
||||
```
|
||||
|
||||
- Access the `webui` service:
|
||||
```bash
|
||||
curl localhost:8001/api/v1/namespaces/default/services/webui/proxy/index.html
|
||||
```
|
||||
|
||||
- Terminate the proxy:
|
||||
```bash
|
||||
kill %1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## `kubectl port-forward` in theory
|
||||
|
||||
- What if we want to access a TCP service?
|
||||
|
||||
- We can use `kubectl port-forward` instead
|
||||
|
||||
- It will create a TCP relay to forward connections to a specific port
|
||||
|
||||
(of a pod, service, deployment...)
|
||||
|
||||
- The syntax is:
|
||||
|
||||
`kubectl port-forward service/name_of_service local_port:remote_port`
|
||||
|
||||
- If only one port number is specified, it is used for both local and remote ports
|
||||
|
||||
---
|
||||
|
||||
## `kubectl port-forward` in practice
|
||||
|
||||
- Let's access our remote Redis server
|
||||
|
||||
.exercise[
|
||||
|
||||
- Forward connections from local port 10000 to remote port 6379:
|
||||
```bash
|
||||
kubectl port-forward svc/redis 10000:6379 &
|
||||
```
|
||||
|
||||
- Connect to the Redis server:
|
||||
```bash
|
||||
telnet localhost 10000
|
||||
```
|
||||
|
||||
- Issue a few commands, e.g. `INFO server` then `QUIT`
|
||||
|
||||
<!--
|
||||
```wait Connected to localhost```
|
||||
```keys INFO server```
|
||||
```keys ^J```
|
||||
```keys QUIT```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Terminate the port forwarder:
|
||||
```bash
|
||||
kill %1
|
||||
```
|
||||
|
||||
]
|
||||
533
slides/k8s/authn-authz.md
Normal file
533
slides/k8s/authn-authz.md
Normal file
@@ -0,0 +1,533 @@
|
||||
# Authentication and authorization
|
||||
|
||||
*And first, a little refresher!*
|
||||
|
||||
- Authentication = verifying the identity of a person
|
||||
|
||||
On a UNIX system, we can authenticate with login+password, SSH keys ...
|
||||
|
||||
- Authorization = listing what they are allowed to do
|
||||
|
||||
On a UNIX system, this can include file permissions, sudoer entries ...
|
||||
|
||||
- Sometimes abbreviated as "authn" and "authz"
|
||||
|
||||
- In good modular systems, these things are decoupled
|
||||
|
||||
(so we can e.g. change a password or SSH key without having to reset access rights)
|
||||
|
||||
---
|
||||
|
||||
## Authentication in Kubernetes
|
||||
|
||||
- When the API server receives a request, it tries to authenticate it
|
||||
|
||||
(it examines headers, certificates ... anything available)
|
||||
|
||||
- Many authentication methods can be used simultaneously:
|
||||
|
||||
- TLS client certificates (that's what we've been doing with `kubectl` so far)
|
||||
|
||||
- bearer tokens (a secret token in the HTTP headers of the request)
|
||||
|
||||
- [HTTP basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication) (carrying user and password in a HTTP header)
|
||||
|
||||
- authentication proxy (sitting in front of the API and setting trusted headers)
|
||||
|
||||
- It's the job of the authentication method to produce:
|
||||
|
||||
- the user name
|
||||
- the user ID
|
||||
- a list of groups
|
||||
|
||||
- The API server doesn't interpret these; it'll be the job of *authorizers*
|
||||
|
||||
---
|
||||
|
||||
## Anonymous requests
|
||||
|
||||
- If any authentication method *rejects* a request, it's denied
|
||||
|
||||
(`401 Unauthorized` HTTP code)
|
||||
|
||||
- If a request is neither accepted nor accepted by anyone, it's anonymous
|
||||
|
||||
- the user name is `system:anonymous`
|
||||
|
||||
- the list of groups is `[system:unauthenticated]`
|
||||
|
||||
- By default, the anonymous user can't do anything
|
||||
|
||||
(that's what you get if you just `curl` the Kubernetes API)
|
||||
|
||||
---
|
||||
|
||||
## Authentication with TLS certificates
|
||||
|
||||
- This is enabled in most Kubernetes deployments
|
||||
|
||||
- The user name is derived from the `CN` in the client certificates
|
||||
|
||||
- The groups are derived from the `O` fields in the client certificate
|
||||
|
||||
- From the point of view of the Kubernetes API, users do not exist
|
||||
|
||||
(i.e. they are not stored in etcd or anywhere else)
|
||||
|
||||
- Users can be created (and given membership to groups) independently of the API
|
||||
|
||||
- The Kubernetes API can be set up to use your custom CA to validate client certs
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing our admin certificate
|
||||
|
||||
- Let's inspect the certificate we've been using all this time!
|
||||
|
||||
.exercise[
|
||||
|
||||
- This command will show the `CN` and `O` fields for our certificate:
|
||||
```bash
|
||||
kubectl config view \
|
||||
--raw \
|
||||
-o json \
|
||||
| jq -r .users[0].user[\"client-certificate-data\"] \
|
||||
| base64 -d \
|
||||
| openssl x509 -text \
|
||||
| grep Subject:
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Let's break down that command together! 😅
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Breaking down the command
|
||||
|
||||
- `kubectl config view` shows the Kubernetes user configuration
|
||||
- `--raw` includes certificate information (which shows as REDACTED otherwise)
|
||||
- `-o json` outputs the information in JSON format
|
||||
- `| jq ...` extracts the field with the user certificate (in base64)
|
||||
- `| base64 -d` decodes the base64 format (now we have a PEM file)
|
||||
- `| openssl x509 -text` parses the certificate and outputs it as plain text
|
||||
- `| grep Subject:` shows us the line that interests us
|
||||
|
||||
→ We are user `kubernetes-admin`, in group `system:masters`.
|
||||
|
||||
---
|
||||
|
||||
## Authentication with tokens
|
||||
|
||||
- Tokens are passed as HTTP headers:
|
||||
|
||||
`Authorization: Bearer and-then-here-comes-the-token`
|
||||
|
||||
- Tokens can be validated through a number of different methods:
|
||||
|
||||
- static tokens hard-coded in a file on the API server
|
||||
|
||||
- [bootstrap tokens](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) (special case to create a cluster or join nodes)
|
||||
|
||||
- [OpenID Connect tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens) (to delegate authentication to compatible OAuth2 providers)
|
||||
|
||||
- service accounts (these deserve more details, coming right up!)
|
||||
|
||||
---
|
||||
|
||||
## Service accounts
|
||||
|
||||
- A service account is a user that exists in the Kubernetes API
|
||||
|
||||
(it is visible with e.g. `kubectl get serviceaccounts`)
|
||||
|
||||
- Service accounts can therefore be created / updated dynamically
|
||||
|
||||
(they don't require hand-editing a file and restarting the API server)
|
||||
|
||||
- A service account is associated with a set of secrets
|
||||
|
||||
(the kind that you can view with `kubectl get secrets`)
|
||||
|
||||
- Service accounts are generally used to grant permissions to applications, services ...
|
||||
|
||||
(as opposed to humans)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Token authentication in practice
|
||||
|
||||
- We are going to list existing service accounts
|
||||
|
||||
- Then we will extract the token for a given service account
|
||||
|
||||
- And we will use that token to authenticate with the API
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Listing service accounts
|
||||
|
||||
.exercise[
|
||||
|
||||
- The resource name is `serviceaccount` or `sa` in short:
|
||||
```bash
|
||||
kubectl get sa
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
There should be just one service account in the default namespace: `default`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Finding the secret
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the secrets for the `default` service account:
|
||||
```bash
|
||||
kubectl get sa default -o yaml
|
||||
SECRET=$(kubectl get sa default -o json | jq -r .secrets[0].name)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should be named `default-token-XXXXX`.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extracting the token
|
||||
|
||||
- The token is stored in the secret, wrapped with base64 encoding
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the secret:
|
||||
```bash
|
||||
kubectl get secret $SECRET -o yaml
|
||||
```
|
||||
|
||||
- Extract the token and decode it:
|
||||
```bash
|
||||
TOKEN=$(kubectl get secret $SECRET -o json \
|
||||
| jq -r .data.token | base64 -d)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using the token
|
||||
|
||||
- Let's send a request to the API, without and with the token
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find the ClusterIP for the `kubernetes` service:
|
||||
```bash
|
||||
kubectl get svc kubernetes
|
||||
API=$(kubectl get svc kubernetes -o json | jq -r .spec.clusterIP)
|
||||
```
|
||||
|
||||
- Connect without the token:
|
||||
```bash
|
||||
curl -k https://$API
|
||||
```
|
||||
|
||||
- Connect with the token:
|
||||
```bash
|
||||
curl -k -H "Authorization: Bearer $TOKEN" https://$API
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Results
|
||||
|
||||
- In both cases, we will get a "Forbidden" error
|
||||
|
||||
- Without authentication, the user is `system:anonymous`
|
||||
|
||||
- With authentication, it is shown as `system:serviceaccount:default:default`
|
||||
|
||||
- The API "sees" us as a different user
|
||||
|
||||
- But neither user has any right, so we can't do nothin'
|
||||
|
||||
- Let's change that!
|
||||
|
||||
---
|
||||
|
||||
## Authorization in Kubernetes
|
||||
|
||||
- There are multiple ways to grant permissions in Kubernetes, called [authorizers](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules):
|
||||
|
||||
- [Node Authorization](https://kubernetes.io/docs/reference/access-authn-authz/node/) (used internally by kubelet; we can ignore it)
|
||||
|
||||
- [Attribute-based access control](https://kubernetes.io/docs/reference/access-authn-authz/abac/) (powerful but complex and static; ignore it too)
|
||||
|
||||
- [Webhook](https://kubernetes.io/docs/reference/access-authn-authz/webhook/) (each API request is submitted to an external service for approval)
|
||||
|
||||
- [Role-based access control](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) (associates permissions to users dynamically)
|
||||
|
||||
- The one we want is the last one, generally abbreviated as RBAC
|
||||
|
||||
---
|
||||
|
||||
## Role-based access control
|
||||
|
||||
- RBAC allows to specify fine-grained permissions
|
||||
|
||||
- Permissions are expressed as *rules*
|
||||
|
||||
- A rule is a combination of:
|
||||
|
||||
- [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) like create, get, list, update, delete ...
|
||||
|
||||
- resources (as in "API resource", like pods, nodes, services ...)
|
||||
|
||||
- resource names (to specify e.g. one specific pod instead of all pods)
|
||||
|
||||
- in some case, [subresources](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-resources) (e.g. logs are subresources of pods)
|
||||
|
||||
---
|
||||
|
||||
## From rules to roles to rolebindings
|
||||
|
||||
- A *role* is an API object containing a list of *rules*
|
||||
|
||||
Example: role "external-load-balancer-configurator" can:
|
||||
- [list, get] resources [endpoints, services, pods]
|
||||
- [update] resources [services]
|
||||
|
||||
- A *rolebinding* associates a role with a user
|
||||
|
||||
Example: rolebinding "external-load-balancer-configurator":
|
||||
- associates user "external-load-balancer-configurator"
|
||||
- with role "external-load-balancer-configurator"
|
||||
|
||||
- Yes, there can be users, roles, and rolebindings with the same name
|
||||
|
||||
- It's a good idea for 1-1-1 bindings; not so much for 1-N ones
|
||||
|
||||
---
|
||||
|
||||
## Cluster-scope permissions
|
||||
|
||||
- API resources Role and RoleBinding are for objects within a namespace
|
||||
|
||||
- We can also define API resources ClusterRole and ClusterRoleBinding
|
||||
|
||||
- These are a superset, allowing to:
|
||||
|
||||
- specify actions on cluster-wide objects (like nodes)
|
||||
|
||||
- operate across all namespaces
|
||||
|
||||
- We can create Role and RoleBinding resources within a namespaces
|
||||
|
||||
- ClusterRole and ClusterRoleBinding resources are global
|
||||
|
||||
---
|
||||
|
||||
## Pods and service accounts
|
||||
|
||||
- A pod can be associated to a service account
|
||||
|
||||
- by default, it is associated to the `default` service account
|
||||
|
||||
- as we've seen earlier, this service account has no permission anyway
|
||||
|
||||
- The associated token is exposed into the pod's filesystem
|
||||
|
||||
(in `/var/run/secrets/kubernetes.io/serviceaccount/token`)
|
||||
|
||||
- Standard Kubernetes tooling (like `kubectl`) will look for it there
|
||||
|
||||
- So Kubernetes tools running in a pod will automatically use the service account
|
||||
|
||||
---
|
||||
|
||||
## In practice
|
||||
|
||||
- We are going to create a service account
|
||||
|
||||
- We will use an existing cluster role (`view`)
|
||||
|
||||
- We will bind together this role and this service account
|
||||
|
||||
- Then we will run a pod using that service account
|
||||
|
||||
- In this pod, we will install `kubectl` and check our permissions
|
||||
|
||||
---
|
||||
|
||||
## Creating a service account
|
||||
|
||||
- We will call the new service account `viewer`
|
||||
|
||||
(note that nothing prevents us from calling it `view`, like the role)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the new service account:
|
||||
```bash
|
||||
kubectl create serviceaccount viewer
|
||||
```
|
||||
|
||||
- List service accounts now:
|
||||
```bash
|
||||
kubectl get serviceaccounts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Binding a role to the service account
|
||||
|
||||
- Binding a role = creating a *rolebinding* object
|
||||
|
||||
- We will call that object `viewercanview`
|
||||
|
||||
(but again, we could call it `view`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the new role binding:
|
||||
```bash
|
||||
kubectl create rolebinding viewercanview \
|
||||
--clusterrole=view \
|
||||
--serviceaccount=default:viewer
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It's important to note a couple of details in these flags ...
|
||||
|
||||
---
|
||||
|
||||
## Roles vs Cluster Roles
|
||||
|
||||
- We used `--clusterrole=view`
|
||||
|
||||
- What would have happened if we had used `--role=view`?
|
||||
|
||||
- we would have bound the role `view` from the local namespace
|
||||
<br/>(instead of the cluster role `view`)
|
||||
|
||||
- the command would have worked fine (no error)
|
||||
|
||||
- but later, our API requests would have been denied
|
||||
|
||||
- This is a deliberate design decision
|
||||
|
||||
(we can reference roles that don't exist, and create/update them later)
|
||||
|
||||
---
|
||||
|
||||
## Users vs Service Accounts
|
||||
|
||||
- We used `--serviceaccount=default:viewer`
|
||||
|
||||
- What would have happened if we had used `--user=default:viewer`?
|
||||
|
||||
- we would have bound the role to a user instead of a service account
|
||||
|
||||
- again, the command would have worked fine (no error)
|
||||
|
||||
- ... but our API requests would have been denied later
|
||||
|
||||
- What's about the `default:` prefix?
|
||||
|
||||
- that's the namespace of the service account
|
||||
|
||||
- yes, it could be inferred from context, but ... `kubectl` requires it
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
- We will run an `alpine` pod and install `kubectl` there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run a one-time pod:
|
||||
```bash
|
||||
kubectl run eyepod --rm -ti --restart=Never \
|
||||
--serviceaccount=viewer \
|
||||
--image alpine
|
||||
```
|
||||
|
||||
- Install `curl`, then use it to install `kubectl`:
|
||||
```bash
|
||||
apk add --no-cache curl
|
||||
URLBASE=https://storage.googleapis.com/kubernetes-release/release
|
||||
KUBEVER=$(curl -s $URLBASE/stable.txt)
|
||||
curl -LO $URLBASE/$KUBEVER/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Running `kubectl` in the pod
|
||||
|
||||
- We'll try to use our `view` permissions, then to create an object
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that we can, indeed, view things:
|
||||
```bash
|
||||
./kubectl get all
|
||||
```
|
||||
|
||||
- But that we can't create things:
|
||||
```
|
||||
./kubectl run tryme --image=nginx
|
||||
```
|
||||
|
||||
- Exit the container with `exit` or `^D`
|
||||
|
||||
<!-- ```keys ^D``` -->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing directly with `kubectl`
|
||||
|
||||
- We can also check for permission with `kubectl auth can-i`:
|
||||
```bash
|
||||
kubectl auth can-i list nodes
|
||||
kubectl auth can-i create pods
|
||||
kubectl auth can-i get pod/name-of-pod
|
||||
kubectl auth can-i get /url-fragment-of-api-request/
|
||||
kubectl auth can-i '*' services
|
||||
```
|
||||
|
||||
- And we can check permissions on behalf of other users:
|
||||
```bash
|
||||
kubectl auth can-i list nodes \
|
||||
--as some-user
|
||||
kubectl auth can-i list nodes \
|
||||
--as system:serviceaccount:<namespace>:<name-of-service-account>
|
||||
```
|
||||
161
slides/k8s/build-with-docker.md
Normal file
161
slides/k8s/build-with-docker.md
Normal file
@@ -0,0 +1,161 @@
|
||||
# Building images with the Docker Engine
|
||||
|
||||
- Until now, we have built our images manually, directly on a node
|
||||
|
||||
- We are going to show how to build images from within the cluster
|
||||
|
||||
(by executing code in a container controlled by Kubernetes)
|
||||
|
||||
- We are going to use the Docker Engine for that purpose
|
||||
|
||||
- To access the Docker Engine, we will mount the Docker socket in our container
|
||||
|
||||
- After building the image, we will push it to our self-hosted registry
|
||||
|
||||
---
|
||||
|
||||
## Resource specification for our builder pod
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: "`3XXXX`"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Breaking down the pod specification (1/2)
|
||||
|
||||
- `restartPolicy: OnFailure` prevents the build from running in an infinite lopo
|
||||
|
||||
- We use the `docker` image (so that the `docker` CLI is available)
|
||||
|
||||
- We rely on the fact that the `docker` image is based on `alpine`
|
||||
|
||||
(which is why we use `apk` to install `git`)
|
||||
|
||||
- The port for the registry is passed through an environment variable
|
||||
|
||||
(this avoids repeating it in the specification, which would be error-prone)
|
||||
|
||||
.warning[The environment variable has to be a string, so the `"`s are mandatory!]
|
||||
|
||||
---
|
||||
|
||||
## Breaking down the pod specification (2/2)
|
||||
|
||||
- The volume `docker-socket` is declared with a `hostPath`, indicating a bind-mount
|
||||
|
||||
- It is then mounted in the container onto the default Docker socket path
|
||||
|
||||
- We show a interesting way to specify the commands to run in the container:
|
||||
|
||||
- the command executed will be `sh -c <args>`
|
||||
|
||||
- `args` is a list of strings
|
||||
|
||||
- `|` is used to pass a multi-line string in the YAML file
|
||||
|
||||
---
|
||||
|
||||
## Running our pod
|
||||
|
||||
- Let's try this out!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the port used by our self-hosted registry:
|
||||
```bash
|
||||
kubectl get svc registry
|
||||
```
|
||||
|
||||
- Edit `~/container.training/k8s/docker-build.yaml` to put the port number
|
||||
|
||||
- Schedule the pod by applying the resource file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/docker-build.yaml
|
||||
```
|
||||
|
||||
- Watch the logs:
|
||||
```bash
|
||||
stern build-image
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait latest: digest: sha256:```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's missing?
|
||||
|
||||
What do we need to change to make this production-ready?
|
||||
|
||||
- Build from a long-running container (e.g. a `Deployment`) triggered by web hooks
|
||||
|
||||
(the payload of the web hook could indicate the repository to build)
|
||||
|
||||
- Build a specific branch or tag; tag image accordingly
|
||||
|
||||
- Handle repositories where the Dockerfile is not at the root
|
||||
|
||||
(or containing multiple Dockerfiles)
|
||||
|
||||
- Expose build logs so that troubleshooting is straightforward
|
||||
|
||||
--
|
||||
|
||||
🤔 That seems like a lot of work!
|
||||
|
||||
--
|
||||
|
||||
That's why services like Docker Hub (with [automated builds](https://docs.docker.com/docker-hub/builds/)) are helpful.
|
||||
<br/>
|
||||
They handle the whole "code repository → Docker image" workflow.
|
||||
|
||||
---
|
||||
|
||||
## Things to be aware of
|
||||
|
||||
- This is talking directly to a node's Docker Engine to build images
|
||||
|
||||
- It bypasses resource allocation mechanisms used by Kubernetes
|
||||
|
||||
(but you can use *taints* and *tolerations* to dedicate builder nodes)
|
||||
|
||||
- Be careful not to introduce conflicts when naming images
|
||||
|
||||
(e.g. do not allow the user to specify the image names!)
|
||||
|
||||
- Your builds are going to be *fast*
|
||||
|
||||
(because they will leverage Docker's caching system)
|
||||
218
slides/k8s/build-with-kaniko.md
Normal file
218
slides/k8s/build-with-kaniko.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# Building images with Kaniko
|
||||
|
||||
- [Kaniko](https://github.com/GoogleContainerTools/kaniko) is an open source tool to build container images within Kubernetes
|
||||
|
||||
- It can build an image using any standard Dockerfile
|
||||
|
||||
- The resulting image can be pushed to a registry or exported as a tarball
|
||||
|
||||
- It doesn't require any particular privilege
|
||||
|
||||
(and can therefore run in a regular container in a regular pod)
|
||||
|
||||
- This combination of features is pretty unique
|
||||
|
||||
(most other tools use different formats, or require elevated privileges)
|
||||
|
||||
---
|
||||
|
||||
## Kaniko in practice
|
||||
|
||||
- Kaniko provides an "executor image", `gcr.io/kaniko-project/executor`
|
||||
|
||||
- When running that image, we need to specify at least:
|
||||
|
||||
- the path to the build context (=the directory with our Dockerfile)
|
||||
|
||||
- the target image name (including the registry address)
|
||||
|
||||
- Simplified example:
|
||||
```
|
||||
docker run \
|
||||
-v ...:/workspace gcr.io/kaniko-project/executor \
|
||||
--context=/workspace \
|
||||
--destination=registry:5000/image_name:image_tag
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running Kaniko in a Docker container
|
||||
|
||||
- Let's build the image for the DockerCoins `worker` service with Kaniko
|
||||
|
||||
.exercise[
|
||||
|
||||
- Find the port number for our self-hosted registry:
|
||||
```bash
|
||||
kubectl get svc registry
|
||||
PORT=$(kubectl get svc registry -o json | jq .spec.ports[0].nodePort)
|
||||
```
|
||||
|
||||
- Run Kaniko:
|
||||
```bash
|
||||
docker run --net host \
|
||||
-v ~/container.training/dockercoins/worker:/workspace \
|
||||
gcr.io/kaniko-project/executor \
|
||||
--context=/workspace \
|
||||
--destination=127.0.0.1:$PORT/worker-kaniko:latest
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We use `--net host` so that we can connect to the registry over `127.0.0.1`.
|
||||
|
||||
---
|
||||
|
||||
## Running Kaniko in a Kubernetes pod
|
||||
|
||||
- We need to mount or copy the build context to the pod
|
||||
|
||||
- We are going to build straight from the git repository
|
||||
|
||||
(to avoid depending on files sitting on a node, outside of containers)
|
||||
|
||||
- We need to `git clone` the repository before running Kaniko
|
||||
|
||||
- We are going to use two containers sharing a volume:
|
||||
|
||||
- a first container to `git clone` the repository to the volume
|
||||
|
||||
- a second container to run Kaniko, using the content of the volume
|
||||
|
||||
- However, we need the first container to be done before running the second one
|
||||
|
||||
🤔 How could we do that?
|
||||
|
||||
---
|
||||
|
||||
## [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) to the rescue
|
||||
|
||||
- A pod can have a list of `initContainers`
|
||||
|
||||
- `initContainers` are executed in the specified order
|
||||
|
||||
- Each Init Container needs to complete (exit) successfully
|
||||
|
||||
- If any Init Container fails (non-zero exit status) the pod fails
|
||||
|
||||
(what happens next depends on the pod's `restartPolicy`)
|
||||
|
||||
- After all Init Containers have run successfully, normal `containers` are started
|
||||
|
||||
- We are going to execute the `git clone` operation in an Init Container
|
||||
|
||||
---
|
||||
|
||||
## Our Kaniko builder pod
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--insecure"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Explanations
|
||||
|
||||
- We define a volume named `workspace` (using the default `emptyDir` provider)
|
||||
|
||||
- That volume is mounted to `/workspace` in both our containers
|
||||
|
||||
- The `git-clone` Init Container installs `git` and runs `git clone`
|
||||
|
||||
- The `build-image` container executes Kaniko
|
||||
|
||||
- We use our self-hosted registry DNS name (`registry`)
|
||||
|
||||
- We add `--insecure` to use plain HTTP to talk to the registry
|
||||
|
||||
---
|
||||
|
||||
## Running our Kaniko builder pod
|
||||
|
||||
- The YAML for the pod is in `k8s/kaniko-build.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the pod:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/kaniko-build.yaml
|
||||
```
|
||||
|
||||
- Watch the logs:
|
||||
```bash
|
||||
stern kaniko
|
||||
```
|
||||
|
||||
<!--
|
||||
```longwait registry:5000/rng-kaniko:latest:```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Discussion
|
||||
|
||||
*What should we use? The Docker build technique shown earlier? Kaniko? Something else?*
|
||||
|
||||
- The Docker build technique is simple, and has the potential to be very fast
|
||||
|
||||
- However, it doesn't play nice with Kubernetes resource limits
|
||||
|
||||
- Kaniko plays nice with resource limits
|
||||
|
||||
- However, it's slower (there is no caching at all)
|
||||
|
||||
- The ultimate building tool will probably be [Jessica Frazelle](https://twitter.com/jessfraz)'s [img](https://github.com/genuinetools/img) builder
|
||||
|
||||
(it depends on upstream changes that are not in Kubernetes 1.11.2 yet)
|
||||
|
||||
But ... is it all about [speed](https://github.com/AkihiroSuda/buildbench/issues/1)? (No!)
|
||||
|
||||
---
|
||||
|
||||
## The big picture
|
||||
|
||||
- For starters: the [Docker Hub automated builds](https://docs.docker.com/docker-hub/builds/) are very easy to set up
|
||||
|
||||
- link a GitHub repository with the Docker Hub
|
||||
|
||||
- each time you push to GitHub, an image gets build on the Docker Hub
|
||||
|
||||
- If this doesn't work for you: why?
|
||||
|
||||
- too slow (I'm far from `us-east-1`!) → consider using your cloud provider's registry
|
||||
|
||||
- I'm not using a cloud provider → ok, perhaps you need to self-host then
|
||||
|
||||
- I need fancy features (e.g. CI) → consider something like GitLab
|
||||
@@ -161,7 +161,7 @@ class: pic
|
||||
|
||||
(This is illustrated on the first "super complicated" schema)
|
||||
|
||||
- In some hosted Kubernetes offerings (e.g. GKE), the control plane is invisible
|
||||
- In some hosted Kubernetes offerings (e.g. AKS, GKE, EKS), the control plane is invisible
|
||||
|
||||
(We only "see" a Kubernetes API endpoint)
|
||||
|
||||
542
slides/k8s/configuration.md
Normal file
542
slides/k8s/configuration.md
Normal file
@@ -0,0 +1,542 @@
|
||||
# Managing configuration
|
||||
|
||||
- Some applications need to be configured (obviously!)
|
||||
|
||||
- There are many ways for our code to pick up configuration:
|
||||
|
||||
- command-line arguments
|
||||
|
||||
- environment variables
|
||||
|
||||
- configuration files
|
||||
|
||||
- configuration servers (getting configuration from a database, an API...)
|
||||
|
||||
- ... and more (because programmers can be very creative!)
|
||||
|
||||
- How can we do these things with containers and Kubernetes?
|
||||
|
||||
---
|
||||
|
||||
## Passing configuration to containers
|
||||
|
||||
- There are many ways to pass configuration to code running in a container:
|
||||
|
||||
- baking it in a custom image
|
||||
|
||||
- command-line arguments
|
||||
|
||||
- environment variables
|
||||
|
||||
- injecting configuration files
|
||||
|
||||
- exposing it over the Kubernetes API
|
||||
|
||||
- configuration servers
|
||||
|
||||
- Let's review these different strategies!
|
||||
|
||||
---
|
||||
|
||||
## Baking custom images
|
||||
|
||||
- Put the configuration in the image
|
||||
|
||||
(it can be in a configuration file, but also `ENV` or `CMD` actions)
|
||||
|
||||
- It's easy! It's simple!
|
||||
|
||||
- Unfortunately, it also has downsides:
|
||||
|
||||
- multiplication of images
|
||||
|
||||
- different images for dev, staging, prod ...
|
||||
|
||||
- minor reconfigurations require a whole build/push/pull cycle
|
||||
|
||||
- Avoid doing it unless you don't have the time to figure out other options
|
||||
|
||||
---
|
||||
|
||||
## Command-line arguments
|
||||
|
||||
- Pass options to `args` array in the container specification
|
||||
|
||||
- Example ([source](https://github.com/coreos/pods/blob/master/kubernetes.yaml#L29)):
|
||||
```yaml
|
||||
args:
|
||||
- "--data-dir=/var/lib/etcd"
|
||||
- "--advertise-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-peer-urls=http://127.0.0.1:2380"
|
||||
- "--name=etcd"
|
||||
```
|
||||
|
||||
- The options can be passed directly to the program that we run ...
|
||||
|
||||
... or to a wrapper script that will use them to e.g. generate a config file
|
||||
|
||||
---
|
||||
|
||||
## Command-line arguments, pros & cons
|
||||
|
||||
- Works great when options are passed directly to the running program
|
||||
|
||||
(otherwise, a wrapper script can work around the issue)
|
||||
|
||||
- Works great when there aren't too many parameters
|
||||
|
||||
(to avoid a 20-lines `args` array)
|
||||
|
||||
- Requires documentation and/or understanding of the underlying program
|
||||
|
||||
("which parameters and flags do I need, again?")
|
||||
|
||||
- Well-suited for mandatory parameters (without default values)
|
||||
|
||||
- Not ideal when we need to pass a real configuration file anyway
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
- Pass options through the `env` map in the container specification
|
||||
|
||||
- Example:
|
||||
```yaml
|
||||
env:
|
||||
- name: ADMIN_PORT
|
||||
value: "8080"
|
||||
- name: ADMIN_AUTH
|
||||
value: Basic
|
||||
- name: ADMIN_CRED
|
||||
value: "admin:0pensesame!"
|
||||
```
|
||||
|
||||
.warning[`value` must be a string! Make sure that numbers and fancy strings are quoted.]
|
||||
|
||||
🤔 Why this weird `{name: xxx, value: yyy}` scheme? It will be revealed soon!
|
||||
|
||||
---
|
||||
|
||||
## The downward API
|
||||
|
||||
- In the previous example, environment variables have fixed values
|
||||
|
||||
- We can also use a mechanism called the *downward API*
|
||||
|
||||
- The downward API allows to expose pod or container information
|
||||
|
||||
- either through special files (we won't show that for now)
|
||||
|
||||
- or through environment variables
|
||||
|
||||
- The value of these environment variables is computed when the container is started
|
||||
|
||||
- Remember: environment variables won't (can't) change after container start
|
||||
|
||||
- Let's see a few concrete examples!
|
||||
|
||||
---
|
||||
|
||||
## Exposing the pod's namespace
|
||||
|
||||
```yaml
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
```
|
||||
|
||||
- Useful to generate FQDN of services
|
||||
|
||||
(in some contexts, a short name is not enough)
|
||||
|
||||
- For instance, the two commands should be equivalent:
|
||||
```
|
||||
curl api-backend
|
||||
curl api-backend.$MY_POD_NAMESPACE.svc.cluster.local
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Exposing the pod's IP address
|
||||
|
||||
```yaml
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
```
|
||||
|
||||
- Useful if we need to know our IP address
|
||||
|
||||
(we could also read it from `eth0`, but this is more solid)
|
||||
|
||||
---
|
||||
|
||||
## Exposing the container's resource limits
|
||||
|
||||
```yaml
|
||||
- name: MY_MEM_LIMIT
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
containerName: test-container
|
||||
resource: limits.memory
|
||||
```
|
||||
|
||||
- Useful for runtimes where memory is garbage collected
|
||||
|
||||
- Example: the JVM
|
||||
|
||||
(the memory available to the JVM should be set with the `-Xmx ` flag)
|
||||
|
||||
- Best practice: set a memory limit, and pass it to the runtime
|
||||
|
||||
(see [this blog post](https://very-serio.us/2017/12/05/running-jvms-in-kubernetes/) for a detailed example)
|
||||
|
||||
---
|
||||
|
||||
## More about the downward API
|
||||
|
||||
- [This documentation page](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) tells more about these environment variables
|
||||
|
||||
- And [this one](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/) explains the other way to use the downward API
|
||||
|
||||
(through files that get created in the container filesystem)
|
||||
|
||||
---
|
||||
|
||||
## Environment variables, pros and cons
|
||||
|
||||
- Works great when the running program expects these variables
|
||||
|
||||
- Works great for optional parameters with reasonable defaults
|
||||
|
||||
(since the container image can provide these defaults)
|
||||
|
||||
- Sort of auto-documented
|
||||
|
||||
(we can see which environment variables are defined in the image, and their values)
|
||||
|
||||
- Can be (ab)used with longer values ...
|
||||
|
||||
- ... You *can* put an entire Tomcat configuration file in an environment ...
|
||||
|
||||
- ... But *should* you?
|
||||
|
||||
(Do it if you really need to, we're not judging! But we'll see better ways.)
|
||||
|
||||
---
|
||||
|
||||
## Injecting configuration files
|
||||
|
||||
- Sometimes, there is no way around it: we need to inject a full config file
|
||||
|
||||
- Kubernetes provides a mechanism for that purpose: `configmaps`
|
||||
|
||||
- A configmap is a Kubernetes resource that exists in a namespace
|
||||
|
||||
- Conceptually, it's a key/value map
|
||||
|
||||
(values are arbitrary strings)
|
||||
|
||||
- We can think about them in (at least) two different ways:
|
||||
|
||||
- as holding entire configuration file(s)
|
||||
|
||||
- as holding individual configuration parameters
|
||||
|
||||
*Note: to hold sensitive information, we can use "Secrets", which
|
||||
are another type of resource behaving very much like configmaps.
|
||||
We'll cover them just after!*
|
||||
|
||||
---
|
||||
|
||||
## Configmaps storing entire files
|
||||
|
||||
- In this case, each key/value pair corresponds to a configuration file
|
||||
|
||||
- Key = name of the file
|
||||
|
||||
- Value = content of the file
|
||||
|
||||
- There can be one key/value pair, or as many as necessary
|
||||
|
||||
(for complex apps with multiple configuration files)
|
||||
|
||||
- Examples:
|
||||
```
|
||||
# Create a configmap with a single key, "app.conf"
|
||||
kubectl create configmap my-app-config --from-file=app.conf
|
||||
# Create a configmap with a single key, "app.conf" but another file
|
||||
kubectl create configmap my-app-config --from-file=app.conf=app-prod.conf
|
||||
# Create a configmap with multiple keys (one per file in the config.d directory)
|
||||
kubectl create configmap my-app-config --from-file=config.d/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configmaps storing individual parameters
|
||||
|
||||
- In this case, each key/value pair corresponds to a parameter
|
||||
|
||||
- Key = name of the parameter
|
||||
|
||||
- Value = value of the parameter
|
||||
|
||||
- Examples:
|
||||
```
|
||||
# Create a configmap with two keys
|
||||
kubectl create cm my-app-config \
|
||||
--from-literal=foreground=red \
|
||||
--from-literal=background=blue
|
||||
|
||||
# Create a configmap from a file containing key=val pairs
|
||||
kubectl create cm my-app-config \
|
||||
--from-env-file=app.conf
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Exposing configmaps to containers
|
||||
|
||||
- Configmaps can be exposed as plain files in the filesystem of a container
|
||||
|
||||
- this is achieved by declaring a volume and mounting it in the container
|
||||
|
||||
- this is particularly effective for configmaps containing whole files
|
||||
|
||||
- Configmaps can be exposed as environment variables in the container
|
||||
|
||||
- this is achieved with the downward API
|
||||
|
||||
- this is particularly effective for configmaps containing individual parameters
|
||||
|
||||
- Let's see how to do both!
|
||||
|
||||
---
|
||||
|
||||
## Passing a configuration file with a configmap
|
||||
|
||||
- We will start a load balancer powered by HAProxy
|
||||
|
||||
- We will use the [official `haproxy` image](https://hub.docker.com/_/haproxy/)
|
||||
|
||||
- It expects to find its configuration in `/usr/local/etc/haproxy/haproxy.cfg`
|
||||
|
||||
- We will provide a simple HAproxy configuration, `k8s/haproxy.cfg`
|
||||
|
||||
- It listens on port 80, and load balances connections between Google and Bing
|
||||
|
||||
---
|
||||
|
||||
## Creating the configmap
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `k8s` directory in the repository:
|
||||
```bash
|
||||
cd ~/container.training/k8s
|
||||
```
|
||||
|
||||
- Create a configmap named `haproxy` and holding the configuration file:
|
||||
```bash
|
||||
kubectl create configmap haproxy --from-file=haproxy.cfg
|
||||
```
|
||||
|
||||
- Check what our configmap looks like:
|
||||
```bash
|
||||
kubectl get configmap haproxy -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the configmap
|
||||
|
||||
We are going to use the following pod definition:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using the configmap
|
||||
|
||||
- The resource definition from the previous slide is in `k8s/haproxy.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the HAProxy pod:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/haproxy.yaml
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait pod haproxy --for condition=ready``` -->
|
||||
|
||||
- Check the IP address allocated to the pod:
|
||||
```bash
|
||||
kubectl get pod haproxy -o wide
|
||||
IP=$(kubectl get pod haproxy -o json | jq -r .status.podIP)
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our load balancer
|
||||
|
||||
- The load balancer will send:
|
||||
|
||||
- half of the connections to Google
|
||||
|
||||
- the other half to Bing
|
||||
|
||||
.exercise[
|
||||
|
||||
- Access the load balancer a few times:
|
||||
```bash
|
||||
curl -I $IP
|
||||
curl -I $IP
|
||||
curl -I $IP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see connections served by Google (look for the `Location` header) and others served by Bing (indicated by the `X-MSEdge-Ref` header).
|
||||
|
||||
---
|
||||
|
||||
## Exposing configmaps with the downward API
|
||||
|
||||
- We are going to run a Docker registry on a custom port
|
||||
|
||||
- By default, the registry listens on port 5000
|
||||
|
||||
- This can be changed by setting environment variable `REGISTRY_HTTP_ADDR`
|
||||
|
||||
- We are going to store the port number in a configmap
|
||||
|
||||
- Then we will expose that configmap to a container environment variable
|
||||
|
||||
---
|
||||
|
||||
## Creating the configmap
|
||||
|
||||
.exercise[
|
||||
|
||||
- Our configmap will have a single key, `http.addr`:
|
||||
```bash
|
||||
kubectl create configmap registry --from-literal=http.addr=0.0.0.0:80
|
||||
```
|
||||
|
||||
- Check our configmap:
|
||||
```bash
|
||||
kubectl get configmap registry -o yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using the configmap
|
||||
|
||||
We are going to use the following pod definition:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using the configmap
|
||||
|
||||
- The resource definition from the previous slide is in `k8s/registry.yaml`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the registry pod:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/registry.yaml
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait pod registry --for condition=ready``` -->
|
||||
|
||||
- Check the IP address allocated to the pod:
|
||||
```bash
|
||||
kubectl get pod registry -o wide
|
||||
IP=$(kubectl get pod registry -o json | jq -r .status.podIP)
|
||||
```
|
||||
|
||||
- Confirm that the registry is available on port 80:
|
||||
```bash
|
||||
curl $IP/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Passwords, tokens, sensitive information
|
||||
|
||||
- For sensitive information, there is another special resource: *Secrets*
|
||||
|
||||
- Secrets and Configmaps work almost the same way
|
||||
|
||||
(we'll expose the differences on the next slide)
|
||||
|
||||
- The *intent* is different, though:
|
||||
|
||||
*"You should use secrets for things which are actually secret like API keys,
|
||||
credentials, etc., and use config map for not-secret configuration data."*
|
||||
|
||||
*"In the future there will likely be some differentiators for secrets like rotation or support for backing the secret API w/ HSMs, etc."*
|
||||
|
||||
(Source: [the author of both features](https://stackoverflow.com/a/36925553/580281
|
||||
))
|
||||
|
||||
---
|
||||
|
||||
## Differences between configmaps and secrets
|
||||
|
||||
- Secrets are base64-encoded when shown with `kubectl get secrets -o yaml`
|
||||
|
||||
- keep in mind that this is just *encoding*, not *encryption*
|
||||
|
||||
- it is very easy to [automatically extract and decode secrets](https://medium.com/@mveritym/decoding-kubernetes-secrets-60deed7a96a3)
|
||||
|
||||
- [Secrets can be encrypted at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/)
|
||||
|
||||
- With RBAC, we can authorize a user to access configmaps, but not secrets
|
||||
|
||||
(since they are two different kinds of resources)
|
||||
@@ -95,10 +95,21 @@ Note: `--export` will remove "cluster-specific" information, i.e.:
|
||||
|
||||
- Change `kind: Deployment` to `kind: DaemonSet`
|
||||
|
||||
<!--
|
||||
```bash vim rng.yml```
|
||||
```wait kind: Deployment```
|
||||
```keys /Deployment```
|
||||
```keys ^J```
|
||||
```keys cwDaemonSet```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Try to create our new resource:
|
||||
```bash
|
||||
```
|
||||
kubectl apply -f rng.yml
|
||||
```
|
||||
|
||||
@@ -130,6 +141,7 @@ We all knew this couldn't be that easy, right!
|
||||
|
||||
- remove the `replicas` field
|
||||
- remove the `strategy` field (which defines the rollout mechanism for a deployment)
|
||||
- remove the `progressDeadlineSeconds` field (also used by the rollout mechanism)
|
||||
- remove the `status: {}` line at the end
|
||||
|
||||
--
|
||||
@@ -419,11 +431,35 @@ Of course, option 2 offers more learning opportunities. Right?
|
||||
kubectl edit daemonset rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys oisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Update the service to add `isactive: "yes"` to its selector:
|
||||
```bash
|
||||
kubectl edit service rng
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /run: rng```
|
||||
```keys ^J```
|
||||
```keys noisactive: "yes"```
|
||||
```keys ^[``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
@@ -32,15 +32,11 @@ There is an additional step to make the dashboard available from outside (we'll
|
||||
|
||||
- Create all the dashboard resources, with the following command:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/Qamqab
|
||||
kubectl apply -f ~/container.training/k8s/kubernetes-dashboard.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The goo.gl URL expands to:
|
||||
<br/>
|
||||
.small[https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml]
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -72,15 +68,11 @@ The goo.gl URL expands to:
|
||||
|
||||
- Apply the convenient YAML file, and defeat SSL protection:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/tA7GLz
|
||||
kubectl apply -f ~/container.training/k8s/socat.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The goo.gl URL expands to:
|
||||
<br/>
|
||||
.small[.small[https://gist.githubusercontent.com/jpetazzo/c53a28b5b7fdae88bc3c5f0945552c04/raw/da13ef1bdd38cc0e90b7a4074be8d6a0215e1a65/socat.yaml]]
|
||||
|
||||
.warning[All our dashboard traffic is now clear-text, including passwords!]
|
||||
|
||||
---
|
||||
@@ -103,7 +95,7 @@ You'll want the `3xxxx` port.
|
||||
|
||||
- Connect to http://oneofournodes:3xxxx/
|
||||
|
||||
<!-- ```open https://node1:3xxxx/``` -->
|
||||
<!-- ```open http://node1:3xxxx/``` -->
|
||||
|
||||
]
|
||||
|
||||
@@ -135,7 +127,7 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
- Grant admin privileges to the dashboard so we can see our resources:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/CHsLTA
|
||||
kubectl apply -f ~/container.training/k8s/grant-admin-to-dashboard.yaml
|
||||
```
|
||||
|
||||
- Reload the dashboard and enjoy!
|
||||
@@ -161,7 +153,7 @@ The dashboard will then ask you which authentication you want to use.
|
||||
.exercise[
|
||||
|
||||
- Edit the service:
|
||||
```bash
|
||||
```
|
||||
kubectl edit service kubernetes-dashboard
|
||||
```
|
||||
|
||||
@@ -175,7 +167,7 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
## Editing the `kubernetes-dashboard` service
|
||||
|
||||
- If we look at the [YAML](https://goo.gl/Qamqab) that we loaded before, we'll get a hint
|
||||
- If we look at the [YAML](https://github.com/jpetazzo/container.training/blob/master/k8s/kubernetes-dashboard.yaml) that we loaded before, we'll get a hint
|
||||
|
||||
--
|
||||
|
||||
@@ -192,6 +184,16 @@ The dashboard will then ask you which authentication you want to use.
|
||||
|
||||
- Change `ClusterIP` to `NodePort`, save, and exit
|
||||
|
||||
<!--
|
||||
```wait Please edit the object below```
|
||||
```keys /ClusterIP```
|
||||
```keys ^J```
|
||||
```keys cwNodePort```
|
||||
```keys ^[ ``` ]
|
||||
```keys :wq```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Check the port that was assigned with `kubectl -n kube-system get services`
|
||||
|
||||
- Connect to https://oneofournodes:3xxxx/ (yes, https)
|
||||
239
slides/k8s/gitworkflows.md
Normal file
239
slides/k8s/gitworkflows.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Git-based workflows
|
||||
|
||||
- Deploying with `kubectl` has downsides:
|
||||
|
||||
- we don't know *who* deployed *what* and *when*
|
||||
|
||||
- there is no audit trail (except the API server logs)
|
||||
|
||||
- there is no easy way to undo most operations
|
||||
|
||||
- there is no review/approval process (like for code reviews)
|
||||
|
||||
- We have all these things for *code*, though
|
||||
|
||||
- Can we manage cluster state like we manage our source code?
|
||||
|
||||
---
|
||||
|
||||
## Reminder: Kubernetes is *declarative*
|
||||
|
||||
- All we do is create/change resources
|
||||
|
||||
- These resources have a perfect YAML representation
|
||||
|
||||
- All we do is manipulating these YAML representations
|
||||
|
||||
(`kubectl run` generates a YAML file that gets applied)
|
||||
|
||||
- We can store these YAML representations in a code repository
|
||||
|
||||
- We can version that code repository and maintain it with best practices
|
||||
|
||||
- define which branch(es) can go to qa/staging/production
|
||||
|
||||
- control who can push to which branches
|
||||
|
||||
- have formal review processes, pull requests ...
|
||||
|
||||
---
|
||||
|
||||
## Enabling git-based workflows
|
||||
|
||||
- There are a few tools out there to help us do that
|
||||
|
||||
- We'll see demos of two of them: [Flux] and [Gitkube]
|
||||
|
||||
- There are *many* other tools, some of them with even more features
|
||||
|
||||
- There are also *many* integrations with popular CI/CD systems
|
||||
|
||||
(e.g.: GitLab, Jenkins, ...)
|
||||
|
||||
[Flux]: https://www.weave.works/oss/flux/
|
||||
[Gitkube]: https://gitkube.sh/
|
||||
|
||||
---
|
||||
|
||||
## Flux overview
|
||||
|
||||
- We put our Kubernetes resources as YAML files in a git repository
|
||||
|
||||
- Flux polls that repository regularly (every 5 minutes by default)
|
||||
|
||||
- The resources described by the YAML files are created/updated automatically
|
||||
|
||||
- Changes are made by updating the code in the repository
|
||||
|
||||
---
|
||||
|
||||
## Preparing a repository for Flux
|
||||
|
||||
- We need a repository with Kubernetes YAML files
|
||||
|
||||
- I have one: https://github.com/jpetazzo/kubercoins
|
||||
|
||||
- Fork it to your GitHub account
|
||||
|
||||
- Create a new branch in your fork; e.g. `prod`
|
||||
|
||||
(e.g. by adding a line in the README through the GitHub web UI)
|
||||
|
||||
- This is the branch that we are going to use for deployment
|
||||
|
||||
---
|
||||
|
||||
## Setting up Flux
|
||||
|
||||
- Clone the Flux repository:
|
||||
```
|
||||
git clone https://github.com/weaveworks/flux
|
||||
```
|
||||
|
||||
- Edit `deploy/flux-deployment.yaml`
|
||||
|
||||
- Change the `--git-url` and `--git-branch` parameters:
|
||||
```yaml
|
||||
- --git-url=git@github.com:your-git-username/kubercoins
|
||||
- --git-branch=prod
|
||||
```
|
||||
|
||||
- Apply all the YAML:
|
||||
```
|
||||
kubectl apply -f deploy/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Allowing Flux to access the repository
|
||||
|
||||
- When it starts, Flux generates an SSH key
|
||||
|
||||
- Display that key:
|
||||
```
|
||||
kubectl get logs deployment flux | grep identity
|
||||
```
|
||||
|
||||
- Then add that key to the repository, giving it **write** access
|
||||
|
||||
(some Flux features require write access)
|
||||
|
||||
- After a minute or so, DockerCoins will be deployed to the current namespace
|
||||
|
||||
---
|
||||
|
||||
## Making changes
|
||||
|
||||
- Make changes (on the `prod` branch), e.g. change `replicas` in `worker`
|
||||
|
||||
- After a few minutes, the changes will be picked up by Flux and applied
|
||||
|
||||
---
|
||||
|
||||
## Other features
|
||||
|
||||
- Flux can keep a list of all the tags of all the images we're running
|
||||
|
||||
- The `fluxctl` tool can show us if we're running the latest images
|
||||
|
||||
- We can also "automate" a resource (i.e. automatically deploy new images)
|
||||
|
||||
- And much more!
|
||||
|
||||
---
|
||||
|
||||
## Gitkube overview
|
||||
|
||||
- We put our Kubernetes resources as YAML files in a git repository
|
||||
|
||||
- Gitkube is a git server (or "git remote")
|
||||
|
||||
- After making changes to the repository, we push to Gitkube
|
||||
|
||||
- Gitkube applies the resources to the cluster
|
||||
|
||||
---
|
||||
|
||||
## Setting up Gitkube
|
||||
|
||||
- Install the CLI:
|
||||
```
|
||||
sudo curl -L -o /usr/local/bin/gitkube \
|
||||
https://github.com/hasura/gitkube/releases/download/v0.2.1/gitkube_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/gitkube
|
||||
```
|
||||
|
||||
- Install Gitkube on the cluster:
|
||||
```
|
||||
gitkube install --expose ClusterIP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Creating a Remote
|
||||
|
||||
- Gitkube provides a new type of API resource: *Remote*
|
||||
|
||||
(this is using a mechanism called Custom Resource Definitions or CRD)
|
||||
|
||||
- Create and apply a YAML file containing the following manifest:
|
||||
```yaml
|
||||
apiVersion: gitkube.sh/v1alpha1
|
||||
kind: Remote
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
authorizedKeys:
|
||||
- `ssh-rsa AAA...`
|
||||
manifests:
|
||||
path: "."
|
||||
```
|
||||
|
||||
(replace the `ssh-rsa AAA...` section with the content of `~/.ssh/id_rsa.pub`)
|
||||
|
||||
---
|
||||
|
||||
## Pushing to our remote
|
||||
|
||||
- Get the `gitkubed` IP address:
|
||||
```
|
||||
kubectl -n kube-system get svc gitkubed
|
||||
IP=$(kubectl -n kube-system get svc gitkubed -o json |
|
||||
jq -r .spec.clusterIP)
|
||||
```
|
||||
|
||||
- Get ourselves a sample repository with resource YAML files:
|
||||
```
|
||||
git clone git://github.com/jpetazzo/kubercoins
|
||||
cd kubercoins
|
||||
```
|
||||
|
||||
- Add the remote and push to it:
|
||||
```
|
||||
git remote add k8s ssh://default-example@$IP/~/git/default-example
|
||||
git push k8s master
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Making changes
|
||||
|
||||
- Edit a local file
|
||||
|
||||
- Commit
|
||||
|
||||
- Push!
|
||||
|
||||
- Make sure that you push to the `k8s` remote
|
||||
|
||||
---
|
||||
|
||||
## Other features
|
||||
|
||||
- Gitkube can also build container images for us
|
||||
|
||||
(see the [documentation](https://github.com/hasura/gitkube/blob/master/docs/remote.md) for more details)
|
||||
|
||||
- Gitkube can also deploy Helm Charts
|
||||
|
||||
(instead of raw YAML files)
|
||||
178
slides/k8s/healthchecks.md
Normal file
178
slides/k8s/healthchecks.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# Healthchecks
|
||||
|
||||
- Kubernetes provides two kinds of healthchecks: liveness and readiness
|
||||
|
||||
- Healthchecks are *probes* that apply to *containers* (not to pods)
|
||||
|
||||
- Each container can have two (optional) probes:
|
||||
|
||||
- liveness = is this container dead or alive?
|
||||
|
||||
- readiness = is this container ready to serve traffic?
|
||||
|
||||
- Different probes are available (HTTP, TCP, program execution)
|
||||
|
||||
- Let's see the difference and how to use them!
|
||||
|
||||
---
|
||||
|
||||
## Liveness probe
|
||||
|
||||
- Indicates if the container is dead or alive
|
||||
|
||||
- A dead container cannot come back to life
|
||||
|
||||
- If the liveness probe fails, the container is killed
|
||||
|
||||
(to make really sure that it's really dead; no zombies or undeads!)
|
||||
|
||||
- What happens next depends on the pod's `restartPolicy`:
|
||||
|
||||
- `Never`: the container is not restarted
|
||||
|
||||
- `OnFailure` or `Always`: the container is restarted
|
||||
|
||||
---
|
||||
|
||||
## When to use a liveness probe
|
||||
|
||||
- To indicate failures that can't be recovered
|
||||
|
||||
- deadlocks (causing all requests to time out)
|
||||
|
||||
- internal corruption (causing all requests to error)
|
||||
|
||||
- If the liveness probe fails *N* consecutive times, the container is killed
|
||||
|
||||
- *N* is the `failureThreshold` (3 by default)
|
||||
|
||||
---
|
||||
|
||||
## Readiness probe
|
||||
|
||||
- Indicates if the container is ready to serve traffic
|
||||
|
||||
- If a container becomes "unready" (let's say busy!) it might be ready again soon
|
||||
|
||||
- If the readiness probe fails:
|
||||
|
||||
- the container is *not* killed
|
||||
|
||||
- if the pod is a member of a service, it is temporarily removed
|
||||
|
||||
- it is re-added as soon as the readiness probe passes again
|
||||
|
||||
---
|
||||
|
||||
## When to use a readiness probe
|
||||
|
||||
- To indicate temporary failures
|
||||
|
||||
- the application can only service *N* parallel connections
|
||||
|
||||
- the runtime is busy doing garbage collection or initial data load
|
||||
|
||||
- The container is marked as "not ready" after `failureThreshold` failed attempts
|
||||
|
||||
(3 by default)
|
||||
|
||||
- It is marked again as "ready" after `successThreshold` successful attempts
|
||||
|
||||
(1 by default)
|
||||
|
||||
---
|
||||
|
||||
## Different types of probes
|
||||
|
||||
- HTTP request
|
||||
|
||||
- specify URL of the request (and optional headers)
|
||||
|
||||
- any status code between 200 and 399 indicates success
|
||||
|
||||
- TCP connection
|
||||
|
||||
- the probe succeeds if the TCP port is open
|
||||
|
||||
- arbitrary exec
|
||||
|
||||
- a command is executed in the container
|
||||
|
||||
- exit status of zero indicates success
|
||||
|
||||
---
|
||||
|
||||
## Benefits of using probes
|
||||
|
||||
- Rolling updates proceed when containers are *actually ready*
|
||||
|
||||
(as opposed to merely started)
|
||||
|
||||
- Containers in a broken state gets killed and restarted
|
||||
|
||||
(instead of serving errors or timeouts)
|
||||
|
||||
- Overloaded backends get removed from load balancer rotation
|
||||
|
||||
(thus improving response times across the board)
|
||||
|
||||
---
|
||||
|
||||
## Example: HTTP probe
|
||||
|
||||
Here is a pod template for the `rng` web service of the DockerCoins app:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: rng-with-liveness
|
||||
spec:
|
||||
containers:
|
||||
- name: rng
|
||||
image: dockercoins/rng:v0.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 80
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 1
|
||||
```
|
||||
|
||||
If the backend serves an error, or takes longer than 1s, 3 times in a row, it gets killed.
|
||||
|
||||
---
|
||||
|
||||
## Example: exec probe
|
||||
|
||||
Here is a pod template for a Redis server:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: redis-with-liveness
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["redis-cli", "ping"]
|
||||
```
|
||||
|
||||
If the Redis process becomes unresponsive, it will be killed.
|
||||
|
||||
---
|
||||
|
||||
## Details about liveness and readiness probes
|
||||
|
||||
- Probes are executed at intervals of `periodSeconds` (default: 10)
|
||||
|
||||
- The timeout for a probe is set with `timeoutSeconds` (default: 1)
|
||||
|
||||
- A probe is considered successful after `successThreshold` successes (default: 1)
|
||||
|
||||
- A probe is considered failing after `failureThreshold` failures (default: 3)
|
||||
|
||||
- If a probe is not defined, it's as if there was an "always successful" probe
|
||||
@@ -34,27 +34,47 @@
|
||||
|
||||
## Installing Helm
|
||||
|
||||
- We need to install the `helm` CLI; then use it to deploy `tiller`
|
||||
- If the `helm` CLI is not installed in your environment, install it
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install the `helm` CLI:
|
||||
- Check if `helm` is installed:
|
||||
```bash
|
||||
helm
|
||||
```
|
||||
|
||||
- If it's not installed, run the following command:
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
|
||||
```
|
||||
|
||||
- Deploy `tiller`:
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing Tiller
|
||||
|
||||
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
|
||||
|
||||
- They can be managed (installed, upgraded...) with the `helm` CLI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy Tiller:
|
||||
```bash
|
||||
helm init
|
||||
```
|
||||
|
||||
- Add the `helm` completion:
|
||||
```bash
|
||||
. <(helm completion $(basename $SHELL))
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If Tiller was already installed, don't worry: this won't break it.
|
||||
|
||||
At the end of the install process, you will see:
|
||||
|
||||
```
|
||||
Happy Helming!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fix account permissions
|
||||
@@ -204,7 +224,7 @@ The chart's metadata includes an URL to the project's home page.
|
||||
.exercise[
|
||||
|
||||
- Let's install our helm chart! (`dockercoins` is the path to the chart)
|
||||
```bash
|
||||
```
|
||||
helm install dockercoins
|
||||
```
|
||||
]
|
||||
524
slides/k8s/ingress.md
Normal file
524
slides/k8s/ingress.md
Normal file
@@ -0,0 +1,524 @@
|
||||
# Exposing HTTP services with Ingress resources
|
||||
|
||||
- *Services* give us a way to access a pod or a set of pods
|
||||
|
||||
- Services can be exposed to the outside world:
|
||||
|
||||
- with type `NodePort` (on a port >30000)
|
||||
|
||||
- with type `LoadBalancer` (allocating an external load balancer)
|
||||
|
||||
- What about HTTP services?
|
||||
|
||||
- how can we expose `webui`, `rng`, `hasher`?
|
||||
|
||||
- the Kubernetes dashboard?
|
||||
|
||||
- a new version of `webui`?
|
||||
|
||||
---
|
||||
|
||||
## Exposing HTTP services
|
||||
|
||||
- If we use `NodePort` services, clients have to specify port numbers
|
||||
|
||||
(i.e. http://xxxxx:31234 instead of just http://xxxxx)
|
||||
|
||||
- `LoadBalancer` services are nice, but:
|
||||
|
||||
- they are not available in all environments
|
||||
|
||||
- they often carry an additional cost (e.g. they provision an ELB)
|
||||
|
||||
- they require one extra step for DNS integration
|
||||
<br/>
|
||||
(waiting for the `LoadBalancer` to be provisioned; then adding it to DNS)
|
||||
|
||||
- We could build our own reverse proxy
|
||||
|
||||
---
|
||||
|
||||
## Building a custom reverse proxy
|
||||
|
||||
- There are many options available:
|
||||
|
||||
Apache, HAProxy, Hipache, NGINX, Traefik, ...
|
||||
|
||||
(look at [jpetazzo/aiguillage](https://github.com/jpetazzo/aiguillage) for a minimal reverse proxy configuration using NGINX)
|
||||
|
||||
- Most of these options require us to update/edit configuration files after each change
|
||||
|
||||
- Some of them can pick up virtual hosts and backends from a configuration store
|
||||
|
||||
- Wouldn't it be nice if this configuration could be managed with the Kubernetes API?
|
||||
|
||||
--
|
||||
|
||||
- Enter.red[¹] *Ingress* resources!
|
||||
|
||||
.footnote[.red[¹] Pun maybe intended.]
|
||||
|
||||
---
|
||||
|
||||
## Ingress resources
|
||||
|
||||
- Kubernetes API resource (`kubectl get ingress`/`ingresses`/`ing`)
|
||||
|
||||
- Designed to expose HTTP services
|
||||
|
||||
- Basic features:
|
||||
|
||||
- load balancing
|
||||
- SSL termination
|
||||
- name-based virtual hosting
|
||||
|
||||
- Can also route to different services depending on:
|
||||
|
||||
- URI path (e.g. `/api`→`api-service`, `/static`→`assets-service`)
|
||||
- Client headers, including cookies (for A/B testing, canary deployment...)
|
||||
- and more!
|
||||
|
||||
---
|
||||
|
||||
## Principle of operation
|
||||
|
||||
- Step 1: deploy an *ingress controller*
|
||||
|
||||
- ingress controller = load balancer + control loop
|
||||
|
||||
- the control loop watches over ingress resources, and configures the LB accordingly
|
||||
|
||||
- Step 2: setup DNS
|
||||
|
||||
- associate DNS entries with the load balancer address
|
||||
|
||||
- Step 3: create *ingress resources*
|
||||
|
||||
- the ingress controller picks up these resources and configures the LB
|
||||
|
||||
- Step 4: profit!
|
||||
|
||||
---
|
||||
|
||||
## Ingress in action
|
||||
|
||||
- We will deploy the Traefik ingress controller
|
||||
|
||||
- this is an arbitrary choice
|
||||
|
||||
- maybe motivated by the fact that Traefik releases are named after cheeses
|
||||
|
||||
- For DNS, we will use [nip.io](http://nip.io/)
|
||||
|
||||
- `*.1.2.3.4.nip.io` resolves to `1.2.3.4`
|
||||
|
||||
- We will create ingress resources for various HTTP services
|
||||
|
||||
---
|
||||
|
||||
## Deploying pods listening on port 80
|
||||
|
||||
- We want our ingress load balancer to be available on port 80
|
||||
|
||||
- We could do that with a `LoadBalancer` service
|
||||
|
||||
... but it requires support from the underlying infrastructure
|
||||
|
||||
- We could use pods specifying `hostPort: 80`
|
||||
|
||||
... but with most CNI plugins, this [doesn't work or require additional setup](https://github.com/kubernetes/kubernetes/issues/23920)
|
||||
|
||||
- We could use a `NodePort` service
|
||||
|
||||
... but that requires [changing the `--service-node-port-range` flag in the API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
|
||||
|
||||
- Last resort: the `hostNetwork` mode
|
||||
|
||||
---
|
||||
|
||||
## Without `hostNetwork`
|
||||
|
||||
- Normally, each pod gets its own *network namespace*
|
||||
|
||||
(sometimes called sandbox or network sandbox)
|
||||
|
||||
- An IP address is associated to the pod
|
||||
|
||||
- This IP address is routed/connected to the cluster network
|
||||
|
||||
- All containers of that pod are sharing that network namespace
|
||||
|
||||
(and therefore using the same IP address)
|
||||
|
||||
---
|
||||
|
||||
## With `hostNetwork: true`
|
||||
|
||||
- No network namespace gets created
|
||||
|
||||
- The pod is using the network namespace of the host
|
||||
|
||||
- It "sees" (and can use) the interfaces (and IP addresses) of the host
|
||||
|
||||
- The pod can receive outside traffic directly, on any port
|
||||
|
||||
- Downside: with most network plugins, network policies won't work for that pod
|
||||
|
||||
- most network policies work at the IP address level
|
||||
|
||||
- filtering that pod = filtering traffic from the node
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik
|
||||
|
||||
- The [Traefik documentation](https://docs.traefik.io/user-guide/kubernetes/#deploy-trfik-using-a-deployment-or-daemonset) tells us to pick between Deployment and Daemon Set
|
||||
|
||||
- We are going to use a Daemon Set so that each node can accept connections
|
||||
|
||||
- We will do two minor changes to the [YAML provided by Traefik](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-ds.yaml):
|
||||
|
||||
- enable `hostNetwork`
|
||||
|
||||
- add a *toleration* so that Traefik also runs on `node1`
|
||||
|
||||
---
|
||||
|
||||
## Taints and tolerations
|
||||
|
||||
- A *taint* is an attribute added to a node
|
||||
|
||||
- It prevents pods from running on the node
|
||||
|
||||
- ... Unless they have a matching *toleration*
|
||||
|
||||
- When deploying with `kubeadm`:
|
||||
|
||||
- a taint is placed on the node dedicated the control plane
|
||||
|
||||
- the pods running the control plane have a matching toleration
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking taints on our nodes
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check our nodes specs:
|
||||
```bash
|
||||
kubectl get node node1 -o json | jq .spec
|
||||
kubectl get node node2 -o json | jq .spec
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should see a result only for `node1` (the one with the control plane):
|
||||
|
||||
```json
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "node-role.kubernetes.io/master"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Understanding a taint
|
||||
|
||||
- The `key` can be interpreted as:
|
||||
|
||||
- a reservation for a special set of pods
|
||||
<br/>
|
||||
(here, this means "this node is reserved for the control plane")
|
||||
|
||||
- an error condition on the node
|
||||
<br/>
|
||||
(for instance: "disk full", do not start new pods here!)
|
||||
|
||||
- The `effect` can be:
|
||||
|
||||
- `NoSchedule` (don't run new pods here)
|
||||
|
||||
- `PreferNoSchedule` (try not to run new pods here)
|
||||
|
||||
- `NoExecute` (don't run new pods and evict running pods)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Checking tolerations on the control plane
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check tolerations for CoreDNS:
|
||||
```bash
|
||||
kubectl -n kube-system get deployments coredns -o json |
|
||||
jq .spec.template.spec.tolerations
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The result should include:
|
||||
```json
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "node-role.kubernetes.io/master"
|
||||
}
|
||||
```
|
||||
|
||||
It means: "bypass the exact taint that we saw earlier on `node1`."
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Special tolerations
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check tolerations on `kube-proxy`:
|
||||
```bash
|
||||
kubectl -n kube-system get ds kube-proxy -o json |
|
||||
jq .spec.template.spec.tolerations
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The result should include:
|
||||
```json
|
||||
{
|
||||
"operator": "Exists"
|
||||
}
|
||||
```
|
||||
|
||||
This one is a special case that means "ignore all taints and run anyway."
|
||||
|
||||
---
|
||||
|
||||
## Running Traefik on our cluster
|
||||
|
||||
- We provide a YAML file (`k8s/traefik.yaml`) which is essentially the sum of:
|
||||
|
||||
- [Traefik's Daemon Set resources](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-ds.yaml) (patched with `hostNetwork` and tolerations)
|
||||
|
||||
- [Traefik's RBAC rules](https://github.com/containous/traefik/blob/master/examples/k8s/traefik-rbac.yaml) allowing it to watch necessary API objects
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the YAML:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/traefik.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking that Traefik runs correctly
|
||||
|
||||
- If Traefik started correctly, we now have a web server listening on each node
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that Traefik is serving 80/tcp:
|
||||
```bash
|
||||
curl localhost
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We should get a `404 page not found` error.
|
||||
|
||||
This is normal: we haven't provided any ingress rule yet.
|
||||
|
||||
---
|
||||
|
||||
## Setting up DNS
|
||||
|
||||
- To make our lives easier, we will use [nip.io](http://nip.io)
|
||||
|
||||
- Check out `http://cheddar.A.B.C.D.nip.io`
|
||||
|
||||
(replacing A.B.C.D with the IP address of `node1`)
|
||||
|
||||
- We should get the same `404 page not found` error
|
||||
|
||||
(meaning that our DNS is "set up properly", so to speak!)
|
||||
|
||||
---
|
||||
|
||||
## Traefik web UI
|
||||
|
||||
- Traefik provides a web dashboard
|
||||
|
||||
- With the current install method, it's listening on port 8080
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to `http://node1:8080` (replacing `node1` with its IP address)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Setting up host-based routing ingress rules
|
||||
|
||||
- We are going to use `errm/cheese` images
|
||||
|
||||
(there are [3 tags available](https://hub.docker.com/r/errm/cheese/tags/): wensleydale, cheddar, stilton)
|
||||
|
||||
- These images contain a simple static HTTP server sending a picture of cheese
|
||||
|
||||
- We will run 3 deployments (one for each cheese)
|
||||
|
||||
- We will create 3 services (one for each deployment)
|
||||
|
||||
- Then we will create 3 ingress rules (one for each service)
|
||||
|
||||
- We will route `<name-of-cheese>.A.B.C.D.nip.io` to the corresponding deployment
|
||||
|
||||
---
|
||||
|
||||
## Running cheesy web servers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Run all three deployments:
|
||||
```bash
|
||||
kubectl run cheddar --image=errm/cheese:cheddar
|
||||
kubectl run stilton --image=errm/cheese:stilton
|
||||
kubectl run wensleydale --image=errm/cheese:wensleydale
|
||||
```
|
||||
|
||||
- Create a service for each of them:
|
||||
```bash
|
||||
kubectl expose deployment cheddar --port=80
|
||||
kubectl expose deployment stilton --port=80
|
||||
kubectl expose deployment wensleydale --port=80
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What does an ingress resource look like?
|
||||
|
||||
Here is a minimal host-based ingress resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.`A.B.C.D`.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
```
|
||||
|
||||
(It is in `k8s/ingress.yaml`.)
|
||||
|
||||
---
|
||||
|
||||
## Creating our first ingress resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the file `~/container.training/k8s/ingress.yaml`
|
||||
|
||||
- Replace A.B.C.D with the IP address of `node1`
|
||||
|
||||
- Apply the file
|
||||
|
||||
- Open http://cheddar.A.B.C.D.nip.io
|
||||
|
||||
]
|
||||
|
||||
(An image of a piece of cheese should show up.)
|
||||
|
||||
---
|
||||
|
||||
## Creating the other ingress resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the file `~/container.training/k8s/ingress.yaml`
|
||||
|
||||
- Replace `cheddar` with `stilton` (in `name`, `host`, `serviceName`)
|
||||
|
||||
- Apply the file
|
||||
|
||||
- Check that `stilton.A.B.C.D.nip.io` works correctly
|
||||
|
||||
- Repeat for `wensleydale`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Using multiple ingress controllers
|
||||
|
||||
- You can have multiple ingress controllers active simultaneously
|
||||
|
||||
(e.g. Traefik and NGINX)
|
||||
|
||||
- You can even have multiple instances of the same controller
|
||||
|
||||
(e.g. one for internal, another for external traffic)
|
||||
|
||||
- The `kubernetes.io/ingress.class` annotation can be used to tell which one to use
|
||||
|
||||
- It's OK if multiple ingress controllers configure the same resource
|
||||
|
||||
(it just means that the service will be accessible through multiple paths)
|
||||
|
||||
---
|
||||
|
||||
## Ingress: the good
|
||||
|
||||
- The traffic flows directly from the ingress load balancer to the backends
|
||||
|
||||
- it doesn't need to go through the `ClusterIP`
|
||||
|
||||
- in fact, we don't even need a `ClusterIP` (we can use a headless service)
|
||||
|
||||
- The load balancer can be outside of Kubernetes
|
||||
|
||||
(as long as it has access to the cluster subnet)
|
||||
|
||||
- This allows to use external (hardware, physical machines...) load balancers
|
||||
|
||||
- Annotations can encode special features
|
||||
|
||||
(rate-limiting, A/B testing, session stickiness, etc.)
|
||||
|
||||
---
|
||||
|
||||
## Ingress: the bad
|
||||
|
||||
- Aforementioned "special features" are not standardized yet
|
||||
|
||||
- Some controllers will support them; some won't
|
||||
|
||||
- Even relatively common features (stripping a path prefix) can differ:
|
||||
|
||||
- [traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip](https://docs.traefik.io/user-guide/kubernetes/#path-based-routing)
|
||||
|
||||
- [ingress.kubernetes.io/rewrite-target: /](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/rewrite)
|
||||
|
||||
- This should eventually stabilize
|
||||
|
||||
(remember that ingresses are currently `apiVersion: extensions/v1beta1`)
|
||||
@@ -59,9 +59,9 @@ The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start a bunch of ElasticSearch containers:
|
||||
- Start a bunch of HTTP servers:
|
||||
```bash
|
||||
kubectl run elastic --image=elasticsearch:2 --replicas=7
|
||||
kubectl run httpenv --image=jpetazzo/httpenv --replicas=10
|
||||
```
|
||||
|
||||
- Watch them being started:
|
||||
@@ -69,13 +69,18 @@ The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
<!-- ```keys ^C``` -->
|
||||
<!--
|
||||
```wait httpenv-```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
The `-w` option "watches" events happening on the specified resources.
|
||||
The `jpetazzo/httpenv` image runs an HTTP server on port 8888.
|
||||
<br/>
|
||||
It serves its environment variables in JSON format.
|
||||
|
||||
Note: please DO NOT call the service `search`. It would collide with the TLD.
|
||||
The `-w` option "watches" events happening on the specified resources.
|
||||
|
||||
---
|
||||
|
||||
@@ -85,9 +90,9 @@ Note: please DO NOT call the service `search`. It would collide with the TLD.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Expose the ElasticSearch HTTP API port:
|
||||
- Expose the HTTP port of our server:
|
||||
```bash
|
||||
kubectl expose deploy/elastic --port 9200
|
||||
kubectl expose deploy/httpenv --port 8888
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
@@ -119,31 +124,34 @@ Note: please DO NOT call the service `search`. It would collide with the TLD.
|
||||
|
||||
## Testing our service
|
||||
|
||||
- We will now send a few HTTP requests to our ElasticSearch pods
|
||||
- We will now send a few HTTP requests to our pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's obtain the IP address that was allocated for our service, *programmatically:*
|
||||
```bash
|
||||
IP=$(kubectl get svc elastic -o go-template --template '{{ .spec.clusterIP }}')
|
||||
IP=$(kubectl get svc httpenv -o go-template --template '{{ .spec.clusterIP }}')
|
||||
```
|
||||
|
||||
<!--
|
||||
```hide kubectl wait deploy httpenv --for condition=available```
|
||||
-->
|
||||
|
||||
- Send a few requests:
|
||||
```bash
|
||||
curl http://$IP:9200/
|
||||
curl http://$IP:8888/
|
||||
```
|
||||
|
||||
- Too much output? Filter it with `jq`:
|
||||
```bash
|
||||
curl -s http://$IP:8888/ | jq .HOSTNAME
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We may see `curl: (7) Failed to connect to _IP_ port 9200: Connection refused`.
|
||||
|
||||
This is normal while the service starts up.
|
||||
|
||||
--
|
||||
|
||||
Once it's running, our requests are load balanced across multiple pods.
|
||||
Our requests are load balanced across multiple pods.
|
||||
|
||||
---
|
||||
|
||||
@@ -197,9 +205,9 @@ class: extra-details
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the endpoints that Kubernetes has associated with our `elastic` service:
|
||||
- Check the endpoints that Kubernetes has associated with our `httpenv` service:
|
||||
```bash
|
||||
kubectl describe service elastic
|
||||
kubectl describe service httpenv
|
||||
```
|
||||
|
||||
]
|
||||
@@ -221,15 +229,15 @@ class: extra-details
|
||||
|
||||
- If we want to see the full list, we can use one of the following commands:
|
||||
```bash
|
||||
kubectl describe endpoints elastic
|
||||
kubectl get endpoints elastic -o yaml
|
||||
kubectl describe endpoints httpenv
|
||||
kubectl get endpoints httpenv -o yaml
|
||||
```
|
||||
|
||||
- These commands will show us a list of IP addresses
|
||||
|
||||
- These IP addresses should match the addresses of the corresponding pods:
|
||||
```bash
|
||||
kubectl get pods -l run=elastic -o wide
|
||||
kubectl get pods -l run=httpenv -o wide
|
||||
```
|
||||
|
||||
---
|
||||
184
slides/k8s/kubectlproxy.md
Normal file
184
slides/k8s/kubectlproxy.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Accessing the API with `kubectl proxy`
|
||||
|
||||
- The API requires us to authenticate.red[¹]
|
||||
|
||||
- There are many authentication methods available, including:
|
||||
|
||||
- TLS client certificates
|
||||
<br/>
|
||||
(that's what we've used so far)
|
||||
|
||||
- HTTP basic password authentication
|
||||
<br/>
|
||||
(from a static file; not recommended)
|
||||
|
||||
- various token mechanisms
|
||||
<br/>
|
||||
(detailed in the [documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#authentication-strategies))
|
||||
|
||||
.red[¹]OK, we lied. If you don't authenticate, you are considered to
|
||||
be user `system:anonymous`, which doesn't have any access rights by default.
|
||||
|
||||
---
|
||||
|
||||
## Accessing the API directly
|
||||
|
||||
- Let's see what happens if we try to access the API directly with `curl`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Retrieve the ClusterIP allocated to the `kubernetes` service:
|
||||
```bash
|
||||
kubectl get svc kubernetes
|
||||
```
|
||||
|
||||
- Replace the IP below and try to connect with `curl`:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The API will tell us that user `system:anonymous` cannot access this path.
|
||||
|
||||
---
|
||||
|
||||
## Authenticating to the API
|
||||
|
||||
If we wanted to talk to the API, we would need to:
|
||||
|
||||
- extract our TLS key and certificate information from `~/.kube/config`
|
||||
|
||||
(the information is in PEM format, encoded in base64)
|
||||
|
||||
- use that information to present our certificate when connecting
|
||||
|
||||
(for instance, with `openssl s_client -key ... -cert ... -connect ...`)
|
||||
|
||||
- figure out exactly which credentials to use
|
||||
|
||||
(once we start juggling multiple clusters)
|
||||
|
||||
- change that whole process if we're using another authentication method
|
||||
|
||||
🤔 There has to be a better way!
|
||||
|
||||
---
|
||||
|
||||
## Using `kubectl proxy` for authentication
|
||||
|
||||
- `kubectl proxy` runs a proxy in the foreground
|
||||
|
||||
- This proxy lets us access the Kubernetes API without authentication
|
||||
|
||||
(`kubectl proxy` adds our credentials on the fly to the requests)
|
||||
|
||||
- This proxy lets us access the Kubernetes API over plain HTTP
|
||||
|
||||
- This is a great tool to learn and experiment with the Kubernetes API
|
||||
|
||||
- ... And for serious usages as well (suitable for one-shot scripts)
|
||||
|
||||
- For unattended use, it is better to create a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
|
||||
---
|
||||
|
||||
## Trying `kubectl proxy`
|
||||
|
||||
- Let's start `kubectl proxy` and then do a simple request with `curl`!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start `kubectl proxy` in the background:
|
||||
```bash
|
||||
kubectl proxy &
|
||||
```
|
||||
|
||||
- Access the API's default route:
|
||||
```bash
|
||||
curl localhost:8001
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait /version```
|
||||
```keys ^J```
|
||||
-->
|
||||
|
||||
- Terminate the proxy:
|
||||
```bash
|
||||
kill %1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The output is a list of available API routes.
|
||||
|
||||
---
|
||||
|
||||
## `kubectl proxy` is intended for local use
|
||||
|
||||
- By default, the proxy listens on port 8001
|
||||
|
||||
(But this can be changed, or we can tell `kubectl proxy` to pick a port)
|
||||
|
||||
- By default, the proxy binds to `127.0.0.1`
|
||||
|
||||
(Making it unreachable from other machines, for security reasons)
|
||||
|
||||
- By default, the proxy only accepts connections from:
|
||||
|
||||
`^localhost$,^127\.0\.0\.1$,^\[::1\]$`
|
||||
|
||||
- This is great when running `kubectl proxy` locally
|
||||
|
||||
- Not-so-great when you want to connect to the proxy from a remote machine
|
||||
|
||||
---
|
||||
|
||||
## Running `kubectl proxy` on a remote machine
|
||||
|
||||
- If we wanted to connect to the proxy from another machine, we would need to:
|
||||
|
||||
- bind to `INADDR_ANY` instead of `127.0.0.1`
|
||||
|
||||
- accept connections from any address
|
||||
|
||||
- This is achieved with:
|
||||
```
|
||||
kubectl proxy --port=8888 --address=0.0.0.0 --accept-hosts=.*
|
||||
```
|
||||
|
||||
.warning[Do not do this on a real cluster: it opens full unauthenticated access!]
|
||||
|
||||
---
|
||||
|
||||
## Security considerations
|
||||
|
||||
- Running `kubectl proxy` openly is a huge security risk
|
||||
|
||||
- It is slightly better to run the proxy where you need it
|
||||
|
||||
(and copy credentials, e.g. `~/.kube/config`, to that place)
|
||||
|
||||
- It is even better to use a limited account with reduced permissions
|
||||
|
||||
---
|
||||
|
||||
## Good to know ...
|
||||
|
||||
- `kubectl proxy` also gives access to all internal services
|
||||
|
||||
- Specifically, services are exposed as such:
|
||||
```
|
||||
/api/v1/namespaces/<namespace>/services/<service>/proxy
|
||||
```
|
||||
|
||||
- We can use `kubectl proxy` to access an internal service in a pinch
|
||||
|
||||
(or, for non HTTP services, `kubectl port-forward`)
|
||||
|
||||
- This is not very useful when running `kubectl` directly on the cluster
|
||||
|
||||
(since we could connect to the services directly anyway)
|
||||
|
||||
- But it is very powerful as soon as you run `kubectl` from a remote machine
|
||||
@@ -26,6 +26,8 @@
|
||||
kubectl run pingpong --image alpine ping 1.1.1.1
|
||||
```
|
||||
|
||||
<!-- ```hide kubectl wait deploy/pingpong --for condition=available``` -->
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
@@ -196,10 +198,13 @@ We could! But the *deployment* would notice it right away, and scale back to the
|
||||
<!--
|
||||
```wait Running```
|
||||
```keys ^C```
|
||||
```hide kubectl wait deploy pingpong --for condition=available```
|
||||
```keys kubectl delete pod ping```
|
||||
```copypaste pong-..........-.....```
|
||||
-->
|
||||
|
||||
- Destroy a pod:
|
||||
```bash
|
||||
```
|
||||
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
|
||||
```
|
||||
]
|
||||
@@ -10,7 +10,12 @@
|
||||
kubectl get deployments -w
|
||||
```
|
||||
|
||||
<!-- ```keys ^C``` -->
|
||||
<!--
|
||||
```wait RESTARTS```
|
||||
```keys ^C```
|
||||
```wait AVAILABLE```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
- Now, create more `worker` replicas:
|
||||
```bash
|
||||
156
slides/k8s/localkubeconfig.md
Normal file
156
slides/k8s/localkubeconfig.md
Normal file
@@ -0,0 +1,156 @@
|
||||
# Controlling the cluster remotely
|
||||
|
||||
- All the operations that we do with `kubectl` can be done remotely
|
||||
|
||||
- In this section, we are going to use `kubectl` from our local machine
|
||||
|
||||
---
|
||||
|
||||
## Installing `kubectl`
|
||||
|
||||
- If you already have `kubectl` on your local machine, you can skip this
|
||||
|
||||
.exercise[
|
||||
|
||||
- Download the `kubectl` binary from one of these links:
|
||||
|
||||
[Linux](https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubectl)
|
||||
|
|
||||
[macOS](https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/darwin/amd64/kubectl)
|
||||
|
|
||||
[Windows](https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/windows/amd64/kubectl.exe)
|
||||
|
||||
- On Linux and macOS, make the binary executable with `chmod +x kubectl`
|
||||
|
||||
(And remember to run it with `./kubectl` or move it to your `$PATH`)
|
||||
|
||||
]
|
||||
|
||||
Note: if you are following along with a different platform (e.g. Linux on an architecture different from amd64, or with a phone or tablet), installing `kubectl` might be more complicated (or even impossible) so feel free to skip this section.
|
||||
|
||||
---
|
||||
|
||||
## Testing `kubectl`
|
||||
|
||||
- Check that `kubectl` works correctly
|
||||
|
||||
(before even trying to connect to a remote cluster!)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ask `kubectl` to show its version number:
|
||||
```bash
|
||||
kubectl version --client
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The output should look like this:
|
||||
```
|
||||
Client Version: version.Info{Major:"1", Minor:"11", GitVersion:"v1.11.2",
|
||||
GitCommit:"bb9ffb1654d4a729bb4cec18ff088eacc153c239", GitTreeState:"clean",
|
||||
BuildDate:"2018-08-07T23:17:28Z", GoVersion:"go1.10.3", Compiler:"gc",
|
||||
Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Moving away the existing `~/.kube/config`
|
||||
|
||||
- If you already have a `~/.kube/config` file, move it away
|
||||
|
||||
(we are going to overwrite it in the following slides!)
|
||||
|
||||
- If you never used `kubectl` on your machine before: nothing to do!
|
||||
|
||||
- If you already used `kubectl` to control a Kubernetes cluster before:
|
||||
|
||||
- rename `~/.kube/config` to e.g. `~/.kube/config.bak`
|
||||
|
||||
---
|
||||
|
||||
## Copying the configuration file from `node1`
|
||||
|
||||
- The `~/.kube/config` file that is on `node1` contains all the credentials we need
|
||||
|
||||
- Let's copy it over!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Copy the file from `node1`; if you are using macOS or Linux, you can do:
|
||||
```
|
||||
scp `USER`@`X.X.X.X`:.kube/config ~/.kube/config
|
||||
# Make sure to replace X.X.X.X with the IP address of node1,
|
||||
# and USER with the user name used to log into node1!
|
||||
```
|
||||
|
||||
- If you are using Windows, adapt these instructions to your SSH client
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Updating the server address
|
||||
|
||||
- There is a good chance that we need to update the server address
|
||||
|
||||
- To know if it is necessary, run `kubectl config view`
|
||||
|
||||
- Look for the `server:` address:
|
||||
|
||||
- if it matches the public IP address of `node1`, you're good!
|
||||
|
||||
- if it is anything else (especially a private IP address), update it!
|
||||
|
||||
- To update the server address, run:
|
||||
```bash
|
||||
kubectl config set-cluster kubernetes --server=https://`X.X.X.X`:6443
|
||||
kubectl config set-cluster kubernetes --insecure-skip-tls-verify
|
||||
# Make sure to replace X.X.X.X with the IP address of node1!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why do we skip TLS verification?
|
||||
|
||||
- Generally, the Kubernetes API uses a certificate that is valid for:
|
||||
|
||||
- `kubernetes`
|
||||
- `kubernetes.default`
|
||||
- `kubernetes.default.svc`
|
||||
- `kubernetes.default.svc.cluster.local`
|
||||
- the ClusterIP address of the `kubernetes` service
|
||||
- the hostname of the node hosting the control plane (e.g. `node1`)
|
||||
- the IP address of the node hosting the control plane
|
||||
|
||||
- On most clouds, the IP address of the node is an internal IP address
|
||||
|
||||
- ... And we are going to connect over the external IP address
|
||||
|
||||
- ... And that external IP address was not used when creating the certificate!
|
||||
|
||||
.warning[It's better to NOT skip TLS verification; this is for educational purposes only!]
|
||||
|
||||
---
|
||||
|
||||
## Checking that we can connect to the cluster
|
||||
|
||||
- We can now run a couple of trivial commands to check that all is well
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the versions of the local client and remote server:
|
||||
```bash
|
||||
kubectl version
|
||||
```
|
||||
|
||||
- View the nodes of the cluster:
|
||||
```bash
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We can now utilize the cluster exactly as we did before, ignoring that it's remote.
|
||||
@@ -40,12 +40,12 @@
|
||||
|
||||
- Load the YAML file into our cluster:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/MUZhE4
|
||||
kubectl apply -f ~/container.training/k8s/efk.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If we [look at the YAML file](https://goo.gl/MUZhE4), we see that
|
||||
If we [look at the YAML file](https://github.com/jpetazzo/container.training/blob/master/k8s/efk.yaml), we see that
|
||||
it creates a daemon set, two deployments, two services,
|
||||
and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
@@ -113,7 +113,7 @@ and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
- The first time you connect to Kibana, you must "configure an index pattern"
|
||||
|
||||
- Just use the one that is suggested, `@timestamp`
|
||||
- Just use the one that is suggested, `@timestamp`.red[*]
|
||||
|
||||
- Then click "Discover" (in the top-left corner)
|
||||
|
||||
@@ -123,6 +123,9 @@ and a few roles and role bindings (to give fluentd the required permissions).
|
||||
|
||||
`kubernetes.host`, `kubernetes.pod_name`, `stream`, `log`
|
||||
|
||||
.red[*]If you don't see `@timestamp`, it's probably because no logs exist yet.
|
||||
<br/>Wait a bit, and double-check the logging pipeline!
|
||||
|
||||
---
|
||||
|
||||
## Caveat emptor
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user