Merge branch 'master' into 2020-01-zr

This commit is contained in:
Jerome Petazzoni
2020-01-20 02:52:31 -06:00
50 changed files with 3968 additions and 414 deletions

21
k8s/canary.yaml Normal file
View File

@@ -0,0 +1,21 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: whatever
annotations:
traefik.ingress.kubernetes.io/service-weights: |
whatever: 90%
whatever-new: 10%
spec:
rules:
- host: whatever.A.B.C.D.nip.io
http:
paths:
- path: /
backend:
serviceName: whatever
servicePort: 80
- path: /
backend:
serviceName: whatever-new
servicePort: 80

View File

@@ -2,8 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
@@ -29,8 +27,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
labels:
app: consul
---
apiVersion: v1
kind: Service
@@ -72,7 +68,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.5"
image: "consul:1.6"
args:
- "agent"
- "-bootstrap-expect=3"

69
k8s/eck-cerebro.yaml Normal file
View File

@@ -0,0 +1,69 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cerebro
name: cerebro
spec:
selector:
matchLabels:
app: cerebro
template:
metadata:
labels:
app: cerebro
spec:
volumes:
- name: conf
configMap:
name: cerebro
containers:
- image: lmenezes/cerebro
name: cerebro
volumeMounts:
- name: conf
mountPath: /conf
args:
- -Dconfig.file=/conf/application.conf
env:
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cerebro
name: cerebro
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cerebro
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cerebro
data:
application.conf: |
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
hosts = [
{
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
name = "demo"
auth = {
username = "elastic"
password = ${?ELASTICSEARCH_PASSWORD}
}
}
]

View File

@@ -0,0 +1,19 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: demo
namespace: eck-demo
spec:
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: default
count: 1
config:
node.data: true
node.ingest: true
node.master: true
node.store.allow_mmap: false
version: 7.5.1

168
k8s/eck-filebeat.yaml Normal file
View File

@@ -0,0 +1,168 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: eck-demo
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# node: ${NODE_NAME}
# hints.enabled: true
# hints.default_config:
# type: container
# paths:
# - /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata:
- add_host_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
username: ${ELASTICSEARCH_USERNAME}
password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.5.1
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: demo-es-http
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: demo-es-elastic-user
key: elastic
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: eck-demo
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: eck-demo
labels:
k8s-app: filebeat
---

17
k8s/eck-kibana.yaml Normal file
View File

@@ -0,0 +1,17 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: demo
spec:
version: 7.5.1
count: 1
elasticsearchRef:
name: demo
namespace: eck-demo
http:
service:
spec:
type: NodePort
tls:
selfSignedCertificate:
disabled: true

1802
k8s/eck-operator.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -36,6 +37,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: default
labels:
app: fluentd
spec:
@@ -95,6 +97,7 @@ metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
selector:
matchLabels:
@@ -122,6 +125,7 @@ metadata:
labels:
app: elasticsearch
name: elasticsearch
namespace: default
spec:
ports:
- port: 9200
@@ -137,6 +141,7 @@ metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
selector:
matchLabels:
@@ -160,6 +165,7 @@ metadata:
labels:
app: kibana
name: kibana
namespace: default
spec:
ports:
- port: 5601

View File

@@ -12,19 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
kind: Namespace
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
name: kubernetes-dashboard
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
@@ -32,62 +25,147 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
@@ -95,7 +173,7 @@ metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
@@ -108,60 +186,124 @@ spec:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --enable-skip-login
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc2
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
- --enable-skip-login
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
- port: 8000
targetPort: 8000
selector:
k8s-app: kubernetes-dashboard
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.2
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -181,10 +323,12 @@ spec:
- args:
- sh
- -c
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
image: alpine
name: dashboard
---
apiVersion: v1
kind: Service
metadata:
@@ -199,13 +343,13 @@ spec:
selector:
app: dashboard
type: NodePort
---
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
name: insecure-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -213,4 +357,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
namespace: kubernetes-dashboard

View File

@@ -1,51 +1,54 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: consul
name: persistentconsul
rules:
- apiGroups: [ "" ]
resources: [ pods ]
verbs: [ get, list ]
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: consul
name: persistentconsul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: consul
kind: ClusterRole
name: persistentconsul
subjects:
- kind: ServiceAccount
name: consul
namespace: orange
name: persistentconsul
namespace: default
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
name: persistentconsul
---
apiVersion: v1
kind: Service
metadata:
name: consul
name: persistentconsul
spec:
ports:
- port: 8500
name: http
selector:
app: consul
app: persistentconsul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
name: persistentconsul
spec:
serviceName: consul
serviceName: persistentconsul
replicas: 3
selector:
matchLabels:
app: consul
app: persistentconsul
volumeClaimTemplates:
- metadata:
name: data
@@ -58,9 +61,9 @@ spec:
template:
metadata:
labels:
app: consul
app: persistentconsul
spec:
serviceAccountName: consul
serviceAccountName: persistentconsul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -69,19 +72,19 @@ spec:
- key: app
operator: In
values:
- consul
- persistentconsul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
containers:
- name: consul
image: "consul:1.5"
image: "consul:1.6"
volumeMounts:
- name: data
mountPath: /consul/data
args:
- "agent"
- "-bootstrap-expect=3"
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
- "-client=0.0.0.0"
- "-data-dir=/consul/data"
- "-server"

View File

@@ -1,4 +1,4 @@
# SOURCE: https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false
# SOURCE: https://install.portworx.com/?mc=false&kbver=1.17.1&b=true&s=%2Fdev%2Floop4&j=auto&c=px-workshop&stork=true&csi=true&lh=true&st=k8s
---
kind: Service
apiVersion: v1
@@ -10,7 +10,7 @@ metadata:
spec:
selector:
name: portworx
type: NodePort
type: ClusterIP
ports:
- name: px-api
protocol: TCP
@@ -50,6 +50,165 @@ spec:
shortNames:
- vps
- vp
preserveUnknownFields: false
validation:
openAPIV3Schema:
type: object
required:
- spec
properties:
spec:
type: object
description: The desired spec of the volume placement strategy
properties:
replicaAffinity:
type: array
description: Allows you to specify a rule which creates an affinity for replicas within a volume
items:
type: object
properties:
enforcement:
type: string
enum:
- required
- preferred
description: Specifies if the given rule is required (hard) or preferred (soft)
topologyKey:
type: string
minLength: 1
description: Key for the node label that the system uses to denote a topology domain. The key can be for any node label that is present on the Kubernetes node.
matchExpressions:
description: Expression to use for the replica affinity rule
type: array
items:
type: object
properties:
key:
type: string
minLength: 1
operator:
type: string
enum:
- In
- NotIn
- Exists
- DoesNotExist
- Lt
- Gt
description: The logical operator to use for comparing the key and values in the match expression
values:
type: array
items:
type: string
required:
- key
- operator
replicaAntiAffinity:
type: array
description: Allows you to specify a rule that creates an anti-affinity for replicas within a volume
items:
type: object
properties:
enforcement:
type: string
enum:
- required
- preferred
description: Specifies if the given rule is required (hard) or preferred (soft)
topologyKey:
type: string
minLength: 1
description: Key for the node label that the system uses to denote a topology domain. The key can be for any node label that is present on the Kubernetes node.
required:
- topologyKey
volumeAffinity:
type: array
description: Allows you to colocate volumes by specifying rules that place replicas of a volume together with those of another volume for which the specified labels match
items:
type: object
properties:
enforcement:
type: string
enum:
- required
- preferred
description: Specifies if the given rule is required (hard) or preferred (soft)
topologyKey:
type: string
minLength: 1
description: Key for the node label that the system uses to denote a topology domain. The key can be for any node label that is present on the Kubernetes node.
matchExpressions:
description: Expression to use for the volume affinity rule
type: array
items:
type: object
properties:
key:
type: string
minLength: 1
operator:
type: string
enum:
- In
- NotIn
- Exists
- DoesNotExist
- Lt
- Gt
description: The logical operator to use for comparing the key and values in the match expression
values:
type: array
items:
type: string
required:
- key
- operator
required:
- matchExpressions
volumeAntiAffinity:
type: array
description: Allows you to specify dissociation rules between 2 or more volumes that match the given labels
items:
type: object
properties:
enforcement:
type: string
enum:
- required
- preferred
description: Specifies if the given rule is required (hard) or preferred (soft)
topologyKey:
type: string
minLength: 1
description: Key for the node label that the system uses to denote a topology domain. The key can be for any node label that is present on the Kubernetes node.
matchExpressions:
description: Expression to use for the volume anti affinity rule
type: array
items:
type: object
properties:
key:
type: string
minLength: 1
operator:
type: string
enum:
- In
- NotIn
- Exists
- DoesNotExist
- Lt
- Gt
description: The logical operator to use for comparing the key and values in the match expression
values:
type: array
items:
type: string
required:
- key
- operator
required:
- matchExpressions
---
apiVersion: v1
kind: ServiceAccount
@@ -84,6 +243,13 @@ rules:
- apiGroups: ["portworx.io"]
resources: ["volumeplacementstrategies"]
verbs: ["get", "list"]
- apiGroups: ["stork.libopenstorage.org"]
resources: ["backuplocations"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -127,14 +293,19 @@ roleRef:
name: px-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: portworx
namespace: kube-system
labels:
name: portworx
annotations:
portworx.com/install-source: "https://install.portworx.com/?kbver=1.15.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true&st=k8s&mc=false"
portworx.com/install-source: "https://install.portworx.com/?mc=false&kbver=1.17.1&b=true&s=%2Fdev%2Floop4&j=auto&c=px-workshop&stork=true&csi=true&lh=true&st=k8s"
spec:
selector:
matchLabels:
name: portworx
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
@@ -158,28 +329,20 @@ spec:
operator: DoesNotExist
hostNetwork: true
hostPID: false
initContainers:
- name: checkloop
image: alpine
command: [ "sh", "-c" ]
args:
- |
if ! grep -q loop4 /proc/partitions; then
echo 'Could not find "loop4" in /proc/partitions. Please create it first.'
exit 1
fi
containers:
- name: portworx
image: portworx/oci-monitor:2.1.3
image: portworx/oci-monitor:2.3.2
imagePullPolicy: Always
args:
["-c", "px-workshop", "-s", "/dev/loop4", "-secret_type", "k8s", "-b",
["-c", "px-workshop", "-s", "/dev/loop4", "-secret_type", "k8s", "-j", "auto", "-b",
"-x", "kubernetes"]
env:
- name: "AUTO_NODE_RECOVERY_TIMEOUT_IN_SECS"
value: "1500"
- name: "PX_TEMPLATE_VERSION"
value: "v4"
- name: CSI_ENDPOINT
value: unix:///var/lib/kubelet/plugins/pxd.portworx.com/csi.sock
livenessProbe:
periodSeconds: 30
@@ -210,6 +373,10 @@ spec:
mountPath: /etc/crictl.yaml
- name: etcpwx
mountPath: /etc/pwx
- name: dev
mountPath: /dev
- name: csi-driver-path
mountPath: /var/lib/kubelet/plugins/pxd.portworx.com
- name: optpwx
mountPath: /opt/pwx
- name: procmount
@@ -224,6 +391,27 @@ spec:
readOnly: true
- name: dbusmount
mountPath: /var/run/dbus
- name: csi-node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/pxd.portworx.com/csi.sock"
imagePullPolicy: Always
env:
- name: ADDRESS
value: /csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- name: csi-driver-path
mountPath: /csi
- name: registration-dir
mountPath: /registration
restartPolicy: Always
serviceAccountName: px-account
volumes:
@@ -246,6 +434,17 @@ spec:
- name: etcpwx
hostPath:
path: /etc/pwx
- name: dev
hostPath:
path: /dev
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: DirectoryOrCreate
- name: csi-driver-path
hostPath:
path: /var/lib/kubelet/plugins/pxd.portworx.com
type: DirectoryOrCreate
- name: optpwx
hostPath:
path: /opt/pwx
@@ -265,6 +464,172 @@ spec:
hostPath:
path: /var/run/dbus
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: px-csi-account
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-csi-role
rules:
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["privileged"]
verbs: ["use"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["*"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotcontents", "volumesnapshotclasses", "volumesnapshots/status"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csidrivers"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: px-csi-role-binding
subjects:
- kind: ServiceAccount
name: px-csi-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: px-csi-role
apiGroup: rbac.authorization.k8s.io
---
kind: Service
apiVersion: v1
metadata:
name: px-csi-service
namespace: kube-system
spec:
clusterIP: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: px-csi-ext
namespace: kube-system
spec:
replicas: 3
selector:
matchLabels:
app: px-csi-driver
template:
metadata:
labels:
app: px-csi-driver
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: px/enabled
operator: NotIn
values:
- "false"
- key: node-role.kubernetes.io/master
operator: DoesNotExist
serviceAccount: px-csi-account
containers:
- name: csi-external-provisioner
imagePullPolicy: Always
image: quay.io/openstorage/csi-provisioner:v1.4.0-1
args:
- "--v=5"
- "--provisioner=pxd.portworx.com"
- "--csi-address=$(ADDRESS)"
- "--enable-leader-election"
- "--leader-election-type=leases"
env:
- name: ADDRESS
value: /csi/csi.sock
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v2.0.0
imagePullPolicy: Always
args:
- "--v=3"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
env:
- name: ADDRESS
value: /csi/csi.sock
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
imagePullPolicy: Always
image: quay.io/k8scsi/csi-resizer:v0.3.0
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
- "--leader-election=true"
env:
- name: ADDRESS
value: /csi/csi.sock
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: /csi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/pxd.portworx.com
type: DirectoryOrCreate
---
kind: Service
apiVersion: v1
metadata:
@@ -275,7 +640,7 @@ metadata:
spec:
selector:
name: portworx-api
type: NodePort
type: ClusterIP
ports:
- name: px-api
protocol: TCP
@@ -290,12 +655,17 @@ spec:
port: 9021
targetPort: 9021
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: portworx-api
namespace: kube-system
labels:
name: portworx-api
spec:
selector:
matchLabels:
name: portworx-api
minReadySeconds: 0
updateStrategy:
type: RollingUpdate
@@ -331,8 +701,14 @@ spec:
port: 9001
restartPolicy: Always
serviceAccountName: px-account
---
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: pxd.portworx.com
spec:
attachRequired: false
podInfoOnMount: false
---
apiVersion: v1
kind: ConfigMap
@@ -368,48 +744,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: stork-role
rules:
- apiGroups: [""]
resources: ["pods", "pods/exec"]
verbs: ["get", "list", "delete", "create", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["stork.libopenstorage.org"]
resources: ["*"]
verbs: ["get", "list", "watch", "update", "patch", "create", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"]
resources: ["deployments", "deployments/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
- apiGroups: ["*"]
resources: ["statefulsets", "statefulsets/extensions"]
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
- apiGroups: ["*"]
resources: ["*"]
verbs: ["list", "get"]
verbs: ["*"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -437,7 +774,7 @@ spec:
port: 8099
targetPort: 8099
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
@@ -447,6 +784,9 @@ metadata:
name: stork
namespace: kube-system
spec:
selector:
matchLabels:
name: stork
strategy:
rollingUpdate:
maxSurge: 1
@@ -469,7 +809,7 @@ spec:
- --leader-elect=true
- --health-monitor-interval=120
imagePullPolicy: Always
image: openstorage/stork:2.2.4
image: openstorage/stork:2.3.1
env:
- name: "PX_SERVICE_NAME"
value: "portworx-api"
@@ -512,8 +852,8 @@ rules:
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get"]
- apiGroups: [""]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "events.k8s.io"]
resources: ["events"]
verbs: ["create", "patch", "update"]
- apiGroups: [""]
@@ -548,8 +888,11 @@ rules:
resources: ["persistentvolumeclaims", "persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
resources: ["storageclasses", "csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update", "get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -564,7 +907,7 @@ roleRef:
name: stork-scheduler-role
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
@@ -574,12 +917,16 @@ metadata:
name: stork-scheduler
namespace: kube-system
spec:
selector:
matchLabels:
name: stork-scheduler
replicas: 3
template:
metadata:
labels:
component: scheduler
tier: control-plane
name: stork-scheduler
name: stork-scheduler
spec:
containers:
@@ -591,7 +938,7 @@ spec:
- --policy-configmap=stork-config
- --policy-configmap-namespace=kube-system
- --lock-object-name=stork-scheduler
image: gcr.io/google_containers/kube-scheduler-amd64:v1.15.2
image: gcr.io/google_containers/kube-scheduler-amd64:v1.17.1
livenessProbe:
httpGet:
path: /healthz
@@ -693,7 +1040,7 @@ spec:
selector:
tier: px-web-console
---
apiVersion: apps/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: px-lighthouse
@@ -701,6 +1048,9 @@ metadata:
labels:
tier: px-web-console
spec:
selector:
matchLabels:
tier: px-web-console
strategy:
rollingUpdate:
maxSurge: 1
@@ -717,7 +1067,7 @@ spec:
spec:
initContainers:
- name: config-init
image: portworx/lh-config-sync:0.4
image: portworx/lh-config-sync:2.0.5
imagePullPolicy: Always
args:
- "init"
@@ -726,7 +1076,7 @@ spec:
mountPath: /config/lh
containers:
- name: px-lighthouse
image: portworx/px-lighthouse:2.0.4
image: portworx/px-lighthouse:2.0.6
imagePullPolicy: Always
args: [ "-kubernetes", "true" ]
ports:
@@ -736,7 +1086,7 @@ spec:
- name: config
mountPath: /config/lh
- name: config-sync
image: portworx/lh-config-sync:0.4
image: portworx/lh-config-sync:2.0.5
imagePullPolicy: Always
args:
- "sync"
@@ -744,7 +1094,7 @@ spec:
- name: config
mountPath: /config/lh
- name: stork-connector
image: portworx/lh-stork-connector:0.2
image: portworx/lh-stork-connector:2.0.5
imagePullPolicy: Always
serviceAccountName: px-lh-account
volumes:
@@ -763,3 +1113,4 @@ provisioner: kubernetes.io/portworx-volume
parameters:
repl: "2"
priority_io: "high"

View File

@@ -242,7 +242,7 @@ EOF"
# Install helm
pssh "
if [ ! -x /usr/local/bin/helm ]; then
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
helm completion bash | sudo tee /etc/bash_completion.d/helm
fi"
@@ -533,14 +533,8 @@ _cmd_helmprom() {
need_tag
pssh "
if i_am_first_node; then
kubectl -n kube-system get serviceaccount helm ||
kubectl -n kube-system create serviceaccount helm
sudo -u docker -H helm init --service-account helm
kubectl get clusterrolebinding helm-can-do-everything ||
kubectl create clusterrolebinding helm-can-do-everything \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:helm
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
sudo -u docker -H helm install prometheus stable/prometheus \
--namespace kube-system \
--set server.service.type=NodePort \
--set server.service.nodePort=30090 \

View File

@@ -26,6 +26,7 @@ IPADDR = None
class State(object):
def __init__(self):
self.clipboard = ""
self.interactive = True
self.verify_status = False
self.simulate_type = True
@@ -38,6 +39,7 @@ class State(object):
def load(self):
data = yaml.load(open("state.yaml"))
self.clipboard = str(data["clipboard"])
self.interactive = bool(data["interactive"])
self.verify_status = bool(data["verify_status"])
self.simulate_type = bool(data["simulate_type"])
@@ -51,6 +53,7 @@ class State(object):
def save(self):
with open("state.yaml", "w") as f:
yaml.dump(dict(
clipboard=self.clipboard,
interactive=self.interactive,
verify_status=self.verify_status,
simulate_type=self.simulate_type,
@@ -85,9 +88,11 @@ class Snippet(object):
# On single-line snippets, the data follows the method immediately
if '\n' in content:
self.method, self.data = content.split('\n', 1)
else:
self.data = self.data.strip()
elif ' ' in content:
self.method, self.data = content.split(' ', 1)
self.data = self.data.strip()
else:
self.method, self.data = content, None
self.next = None
def __str__(self):
@@ -186,7 +191,7 @@ def wait_for_prompt():
if last_line == "$":
# This is a perfect opportunity to grab the node's IP address
global IPADDR
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
IPADDR = re.findall("\[(.*)\]", output, re.MULTILINE)[-1]
return
# When we are in an alpine container, the prompt will be "/ #"
if last_line == "/ #":
@@ -264,19 +269,31 @@ for slide in re.split("\n---?\n", content):
slides.append(Slide(slide))
# Send a single key.
# Useful for special keys, e.g. tmux interprets these strings:
# ^C (and all other sequences starting with a caret)
# Space
# ... and many others (check tmux manpage for details).
def send_key(data):
subprocess.check_call(["tmux", "send-keys", data])
# Send multiple keys.
# If keystroke simulation is off, all keys are sent at once.
# If keystroke simulation is on, keys are sent one by one, with a delay between them.
def send_keys(data):
if state.simulate_type and data[0] != '^':
if not state.simulate_type:
subprocess.check_call(["tmux", "send-keys", data])
else:
for key in data:
if key == ";":
key = "\\;"
if key == "\n":
if interruptible_sleep(1): return
subprocess.check_call(["tmux", "send-keys", key])
send_key(key)
if interruptible_sleep(0.15*random.random()): return
if key == "\n":
if interruptible_sleep(1): return
else:
subprocess.check_call(["tmux", "send-keys", data])
def capture_pane():
@@ -323,7 +340,10 @@ def check_bounds():
while True:
state.save()
slide = slides[state.slide]
snippet = slide.snippets[state.snippet-1] if state.snippet else None
if state.snippet and state.snippet <= len(slide.snippets):
snippet = slide.snippets[state.snippet-1]
else:
snippet = None
click.clear()
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
@@ -398,7 +418,9 @@ while True:
continue
method, data = snippet.method, snippet.data
logging.info("Running with method {}: {}".format(method, data))
if method == "keys":
if method == "key":
send_key(data)
elif method == "keys":
send_keys(data)
elif method == "bash" or (method == "hide" and state.run_hidden):
# Make sure that we're ready
@@ -421,7 +443,7 @@ while True:
wait_for_prompt()
# Verify return code
check_exit_status()
elif method == "copypaste":
elif method == "copy":
screen = capture_pane()
matches = re.findall(data, screen, flags=re.DOTALL)
if len(matches) == 0:
@@ -430,8 +452,12 @@ while True:
match = matches[-1]
# Remove line breaks (like a screen copy paste would do)
match = match.replace('\n', '')
send_keys(match + '\n')
# FIXME: we should factor out the "bash" method
logging.info("Copied {} to clipboard.".format(match))
state.clipboard = match
elif method == "paste":
logging.info("Pasting {} from clipboard.".format(state.clipboard))
send_keys(state.clipboard)
elif method == "check":
wait_for_prompt()
check_exit_status()
elif method == "open":
@@ -445,6 +471,8 @@ while True:
if state.interactive:
print("Press any key to continue to next step...")
click.getchar()
elif method == "tmux":
subprocess.check_call(["tmux"] + data.split())
else:
logging.warning("Unknown method {}: {!r}".format(method, data))
move_forward()

View File

@@ -1 +0,0 @@
../swarm/links.md

View File

@@ -0,0 +1,12 @@
# Links and resources
- [Docker Community Slack](https://community.docker.com/registrations/groups/4316)
- [Docker Community Forums](https://forums.docker.com/)
- [Docker Hub](https://hub.docker.com)
- [Docker Blog](https://blog.docker.com/)
- [Docker documentation](https://docs.docker.com/)
- [Docker on StackOverflow](https://stackoverflow.com/questions/tagged/docker)
- [Docker on Twitter](https://twitter.com/docker)
- [Play With Docker Hands-On Labs](https://training.play-with-docker.com/)
.footnote[These slides (and future updates) are on → https://container.training/]

View File

@@ -118,9 +118,9 @@ installed and set up `kubectl` to communicate with your cluster.
<!--
```wait Connected to localhost```
```keys INFO server```
```keys ^J```
```key ^J```
```keys QUIT```
```keys ^J```
```key ^J```
-->
- Terminate the port forwarder:

View File

@@ -547,7 +547,7 @@ It's important to note a couple of details in these flags...
- Exit the container with `exit` or `^D`
<!-- ```keys ^D``` -->
<!-- ```key ^D``` -->
]

View File

@@ -109,7 +109,7 @@ spec:
<!--
```longwait latest: digest: sha256:```
```keys ^C```
```key ^C```
-->
]

View File

@@ -174,7 +174,7 @@ spec:
<!--
```longwait registry:5000/rng-kaniko:latest:```
```keys ^C```
```key ^C```
-->
]

View File

@@ -199,6 +199,30 @@ class: extra-details
class: extra-details
## How many nodes should a cluster have?
- There is no particular constraint
(no need to have an odd number of nodes for quorum)
- A cluster can have zero node
(but then it won't be able to start any pods)
- For testing and development, having a single node is fine
- For production, make sure that you have extra capacity
(so that your workload still fits if you lose a node or a group of nodes)
- Kubernetes is tested with [up to 5000 nodes](https://kubernetes.io/docs/setup/best-practices/cluster-large/)
(however, running a cluster of that size requires a lot of tuning)
---
class: extra-details
## Do we need to run Docker at all?
No!

View File

@@ -110,20 +110,22 @@
```bash vim rng.yml```
```wait kind: Deployment```
```keys /Deployment```
```keys ^J```
```key ^J```
```keys cwDaemonSet```
```keys ^[``` ]
```key ^[``` ]
```keys :wq```
```keys ^J```
```key ^J```
-->
- Save, quit
- Try to create our new resource:
```
```bash
kubectl apply -f rng.yml
```
<!-- ```wait error:``` -->
]
--
@@ -501,11 +503,11 @@ be any interruption.*
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```key ^J```
```keys noenabled: yes```
```keys ^[``` ]
```key ^[``` ]
```keys :wq```
```keys ^J```
```key ^J```
-->
]
@@ -538,19 +540,18 @@ be any interruption.*
.exercise[
- Update the service to add `enabled: "yes"` to its selector:
```bash
kubectl edit service rng
```
- Update the YAML manifest of the service
- Add `enabled: "yes"` to its selector
<!--
```wait Please edit the object below```
```keys /app: rng```
```keys ^J```
```keys noenabled: "yes"```
```keys ^[``` ]
```keys /yes```
```key ^J```
```keys cw"yes"```
```key ^[``` ]
```keys :wq```
```keys ^J```
```key ^J```
-->
]
@@ -589,16 +590,25 @@ If we did everything correctly, the web UI shouldn't show any change.
```bash
POD=$(kubectl get pod -l app=rng,pod-template-hash -o name)
kubectl logs --tail 1 --follow $POD
```
(We should see a steady stream of HTTP logs)
<!--
```wait HTTP/1.1```
```tmux split-pane -v```
-->
- In another window, remove the label from the pod:
```bash
kubectl label pod -l app=rng,pod-template-hash enabled-
```
(The stream of HTTP logs should stop immediately)
<!--
```key ^D```
```key ^C```
-->
]
There might be a slight change in the web UI (since we removed a bit

View File

@@ -162,6 +162,8 @@ Instead, it has the fields expected in a DaemonSet.
kubectl diff -f web.yaml
```
<!-- ```wait status:``` -->
]
Note: we don't need to specify `--validate=false` here.

View File

@@ -22,9 +22,9 @@
- `helm` is a CLI tool
- `tiller` is its companion server-side component
- It is used to find, install, upgrade *charts*
- A "chart" is an archive containing templatized YAML bundles
- A chart is an archive containing templatized YAML bundles
- Charts are versioned
@@ -32,6 +32,90 @@
---
## Differences between charts and packages
- A package (deb, rpm...) contains binaries, libraries, etc.
- A chart contains YAML manifests
(the binaries, libraries, etc. are in the images referenced by the chart)
- On most distributions, a package can only be installed once
(installing another version replaces the installed one)
- A chart can be installed multiple times
- Each installation is called a *release*
- This allows to install e.g. 10 instances of MongoDB
(with potentially different versions and configurations)
---
class: extra-details
## Wait a minute ...
*But, on my Debian system, I have Python 2 **and** Python 3.
<br/>
Also, I have multiple versions of the Postgres database engine!*
Yes!
But they have different package names:
- `python2.7`, `python3.8`
- `postgresql-10`, `postgresql-11`
Good to know: the Postgres package in Debian includes
provisions to deploy multiple Postgres servers on the
same system, but it's an exception (and it's a lot of
work done by the package maintainer, not by the `dpkg`
or `apt` tools).
---
## Helm 2 vs Helm 3
- Helm 3 was released [November 13, 2019](https://helm.sh/blog/helm-3-released/)
- Charts remain compatible between Helm 2 and Helm 3
- The CLI is very similar (with minor changes to some commands)
- The main difference is that Helm 2 uses `tiller`, a server-side component
- Helm 3 doesn't use `tiller` at all, making it simpler (yay!)
---
class: extra-details
## With or without `tiller`
- With Helm 3:
- the `helm` CLI communicates directly with the Kubernetes API
- it creates resources (deployments, services...) with our credentials
- With Helm 2:
- the `helm` CLI communicates with `tiller`, telling `tiller` what to do
- `tiller` then communicates with the Kubernetes API, using its own credentials
- This indirect model caused significant permissions headaches
(`tiller` required very broad permissions to function)
- `tiller` was removed in Helm 3 to simplify the security aspects
---
## Installing Helm
- If the `helm` CLI is not installed in your environment, install it
@@ -45,14 +129,21 @@
- If it's not installed, run the following command:
```bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \
| bash
```
]
(To install Helm 2, replace `get-helm-3` with `get`.)
---
## Installing Tiller
class: extra-details
## Only if using Helm 2 ...
- We need to install Tiller and give it some permissions
- Tiller is composed of a *service* and a *deployment* in the `kube-system` namespace
@@ -67,8 +158,6 @@
]
If Tiller was already installed, don't worry: this won't break it.
At the end of the install process, you will see:
```
@@ -77,9 +166,11 @@ Happy Helming!
---
## Fix account permissions
class: extra-details
- Helm permission model requires us to tweak permissions
## Only if using Helm 2 ...
- Tiller needs permissions to create Kubernetes resources
- In a more realistic deployment, you might create per-user or per-team
service accounts, roles, and role bindings
@@ -92,6 +183,7 @@ Happy Helming!
--clusterrole=cluster-admin --serviceaccount=kube-system:default
```
]
(Defining the exact roles and permissions on your cluster requires
@@ -100,79 +192,228 @@ fine for personal and development clusters.)
---
## View available charts
## Charts and repositories
- A public repo is pre-configured when installing Helm
- A *repository* (or repo in short) is a collection of charts
- We can view available charts with `helm search` (and an optional keyword)
- It's just a bunch of files
(they can be hosted by a static HTTP server, or on a local directory)
- We can add "repos" to Helm, giving them a nickname
- The nickname is used when referring to charts on that repo
(for instance, if we try to install `hello/world`, that
means the chart `world` on the repo `hello`; and that repo
`hello` might be something like https://blahblah.hello.io/charts/)
---
## Managing repositories
- Let's check what repositories we have, and add the `stable` repo
(the `stable` repo contains a set of official-ish charts)
.exercise[
- View all available charts:
- List our repos:
```bash
helm search
helm repo list
```
- View charts related to `prometheus`:
- Add the `stable` repo:
```bash
helm search prometheus
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
```
]
Adding a repo can take a few seconds (it downloads the list of charts from the repo).
It's OK to add a repo that already exists (it will merely update it).
---
## Install a chart
## Search available charts
- Most charts use `LoadBalancer` service types by default
- We can search available charts with `helm search`
- Most charts require persistent volumes to store data
- We need to specify where to search (only our repos, or Helm Hub)
- We need to relax these requirements a bit
- Let's search for all charts mentioning tomcat!
.exercise[
- Install the Prometheus metrics collector on our cluster:
- Search for tomcat in the repo that we added earlier:
```bash
helm install stable/prometheus \
--set server.service.type=NodePort \
--set server.persistentVolume.enabled=false
helm search repo tomcat
```
- Search for tomcat on the Helm Hub:
```bash
helm search hub tomcat
```
]
Where do these `--set` options come from?
[Helm Hub](https://hub.helm.sh/) indexes many repos, using the [Monocular](https://github.com/helm/monocular) server.
---
## Inspecting a chart
## Charts and releases
- `helm inspect` shows details about a chart (including available options)
- "Installing a chart" means creating a *release*
- We need to name that release
(or use the `--generate-name` to get Helm to generate one for us)
.exercise[
- See the metadata and all available options for `stable/prometheus`:
- Install the tomcat chart that we found earlier:
```bash
helm inspect stable/prometheus
helm install java4ever stable/tomcat
```
]
The chart's metadata includes a URL to the project's home page.
(Sometimes it conveniently points to the documentation for the chart.)
---
## Viewing installed charts
- Helm keeps track of what we've installed
.exercise[
- List installed Helm charts:
- List the releases:
```bash
helm list
```
]
---
class: extra-details
## Searching and installing with Helm 2
- Helm 2 doesn't have support for the Helm Hub
- The `helm search` command only takes a search string argument
(e.g. `helm search tomcat`)
- With Helm 2, the name is optional:
`helm install stable/tomcat` will automatically generate a name
`helm install --name java4ever stable/tomcat` will specify a name
---
## Viewing resources of a release
- This specific chart labels all its resources with a `release` label
- We can use a selector to see these resources
.exercise[
- List all the resources created by this release:
```bash
kuectl get all --selector=release=java4ever
```
]
Note: this `release` label wasn't added automatically by Helm.
<br/>
It is defined in that chart. In other words, not all charts will provide this label.
---
## Configuring a release
- By default, `stable/tomcat` creates a service of type `LoadBalancer`
- We would like to change that to a `NodePort`
- We could use `kubectl edit service java4ever-tomcat`, but ...
... our changes would get overwritten next time we update that chart!
- Instead, we are going to *set a value*
- Values are parameters that the chart can use to change its behavior
- Values have default values
- Each chart is free to define its own values and their defaults
---
## Checking possible values
- We can inspect a chart with `helm show` or `helm inspect`
.exercise[
- Look at the README for tomcat:
```bash
helm show readme stable/tomcat
```
- Look at the values and their defaults:
```bash
helm show values stable/tomcat
```
]
The `values` may or may not have useful comments.
The `readme` may or may not have (accurate) explanations for the values.
(If we're unlucky, there won't be any indication about how to use the values!)
---
## Setting values
- Values can be set when installing a chart, or when upgrading it
- We are going to update `java4ever` to change the type of the service
.exercise[
- Update `java4ever`:
```bash
helm upgrade java4ever stable/tomcat --set service.type=NodePort
```
]
Note that we have to specify the chart that we use (`stable/tomcat`),
even if we just want to update some values.
We can set multiple values. If we want to set many values, we can use `-f`/`--values` and pass a YAML file with all the values.
All unspecified values will take the default values defined in the chart.
---
## Connecting to tomcat
- Let's check the tomcat server that we just installed
- Note: its readiness probe has a 60s delay
(so it will take 60s after the initial deployment before the service works)
.exercise[
- Check the node port allocated to the service:
```bash
kubectl get service java4ever-tomcat
PORT=$(kubectl get service java4ever-tomcat -o jsonpath={..nodePort})
```
- Connect to it, checking the demo app on `/sample/`:
```bash
curl localhost:$PORT/sample/
```
]

View File

@@ -105,19 +105,36 @@
- Monitor pod CPU usage:
```bash
watch kubectl top pods
watch kubectl top pods -l app=busyhttp
```
<!--
```wait NAME```
```tmux split-pane -v```
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
-->
- Monitor service latency:
```bash
httping http://`ClusterIP`/
httping http://`$CLUSTERIP`/
```
<!--
```wait connected to```
```tmux split-pane -v```
-->
- Monitor cluster events:
```bash
kubectl get events -w
```
<!--
```wait Normal```
```tmux split-pane -v```
```bash CLUSTERIP=$(kubectl get svc busyhttp -o jsonpath={.spec.clusterIP})```
-->
]
---
@@ -130,9 +147,15 @@
- Send a lot of requests to the service, with a concurrency level of 3:
```bash
ab -c 3 -n 100000 http://`ClusterIP`/
ab -c 3 -n 100000 http://`$CLUSTERIP`/
```
<!--
```wait be patient```
```tmux split-pane -v```
```tmux selectl even-vertical```
-->
]
The latency (reported by `httping`) should increase above 3s.
@@ -193,6 +216,20 @@ This can also be set with `--cpu-percent=`.
kubectl edit deployment busyhttp
```
<!--
```wait Please edit```
```keys /resources```
```key ^J```
```keys $xxxo requests:```
```key ^J```
```key Space```
```key Space```
```keys cpu: "1"```
```key Escape```
```keys :wq```
```key ^J```
-->
- In the `containers` list, add the following block:
```yaml
resources:
@@ -243,3 +280,29 @@ This can also be set with `--cpu-percent=`.
- The metrics provided by metrics server are standard; everything else is custom
- For more details, see [this great blog post](https://medium.com/uptime-99/kubernetes-hpa-autoscaling-with-custom-and-external-metrics-da7f41ff7846) or [this talk](https://www.youtube.com/watch?v=gSiGFH4ZnS8)
---
## Cleanup
- Since `busyhttp` uses CPU cycles, let's stop it before moving on
.exercise[
- Delete the `busyhttp` Deployment:
```bash
kubectl delete deployment busyhttp
```
<!--
```key ^D```
```key ^C```
```key ^D```
```key ^C```
```key ^D```
```key ^C```
```key ^D```
```key ^C```
-->
]

View File

@@ -524,3 +524,183 @@ spec:
- This should eventually stabilize
(remember that ingresses are currently `apiVersion: networking.k8s.io/v1beta1`)
---
## A special feature in action
- We're going to see how to implement *canary releases* with Traefik
- This feature is available on multiple ingress controllers
- ... But it is configured very differently on each of them
---
## Canary releases
- A *canary release* (or canary launch or canary deployment) is a release that will process only a small fraction of the workload
- After deploying the canary, we compare its metrics to the normal release
- If the metrics look good, the canary will progressively receive more traffic
(until it gets 100% and becomes the new normal release)
- If the metrics aren't good, the canary is automatically removed
- When we deploy a bad release, only a tiny fraction of traffic is affected
---
## Various ways to implement canary
- Example 1: canary for a microservice
- 1% of all requests (sampled randomly) are sent to the canary
- the remaining 99% are sent to the normal release
- Example 2: canary for a web app
- 1% of users are sent to the canary web site
- the remaining 99% are sent to the normal release
- Example 3: canary for shipping physical goods
- 1% of orders are shipped with the canary process
- the reamining 99% are shipped with the normal process
- We're going to implement example 1 (per-request routing)
---
## Canary releases with Traefik
- We need to deploy the canary and expose it with a separate service
- Then, in the Ingress resource, we need:
- multiple `paths` entries (one for each service, canary and normal)
- an extra annotation indicating the weight of each service
- If we want, we can send requests to more than 2 services
- Let's send requests to our 3 cheesy services!
.exercise[
- Create the resource shown on the next slide
]
---
## The Ingress resource
.small[
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: cheeseplate
annotations:
traefik.ingress.kubernetes.io/service-weights: |
cheddar: 50%
wensleydale: 25%
stilton: 25%
spec:
rules:
- host: cheeseplate.`A.B.C.D`.nip.io
http:
paths:
- path: /
backend:
serviceName: cheddar
servicePort: 80
- path: /
backend:
serviceName: wensledale
servicePort: 80
- path: /
backend:
serviceName: stilton
servicePort: 80
```
]
---
## Testing the canary
- Let's check the percentage of requests going to each service
.exercise[
- Continuously send HTTP requests to the new ingress:
```bash
while sleep 0.1; do
curl -s http://cheeseplate.A.B.C.D.nip.io/
done
```
]
We should see a 50/25/25 request mix.
---
class: extra-details
## Load balancing fairness
Note: if we use odd request ratios, the load balancing algorithm might appear to be broken on a small scale (when sending a small number of requests), but on a large scale (with many requests) it will be fair.
For instance, with a 11%/89% ratio, we can see 79 requests going to the 89%-weighted service, and then requests alternating between the two services; then 79 requests again, etc.
---
class: extra-details
## Other ingress controllers
*Just to illustrate how different things are ...*
- With the NGINX ingress controller:
- define two ingress ressources
<br/>
(specifying rules with the same host+path)
- add `nginx.ingress.kubernetes.io/canary` annotations on each
- With Linkerd2:
- define two services
- define an extra service for the weighted aggregate of the two
- define a TrafficSplit (this is a CRD introduced by the SMI spec)
---
class: extra-details
## We need more than that
What we saw is just one of the multiple building blocks that we need to achieve a canary release.
We also need:
- metrics (latency, performance ...) for our releases
- automation to alter canary weights
(increase canary weight if metrics look good; decrease otherwise)
- a mechanism to manage the lifecycle of the canary releases
(create them, promote them, delete them ...)
For inspiration, check [flagger by Weave](https://github.com/weaveworks/flagger).

View File

@@ -124,7 +124,10 @@ kubectl create service externalname k8s --external-name kubernetes.io
kubectl get pods -w
```
<!-- ```keys ^C``` -->
<!--
```wait NAME```
```tmux split-pane -h```
-->
- Create a deployment for this very lightweight HTTP server:
```bash
@@ -191,6 +194,8 @@ kubectl create service externalname k8s --external-name kubernetes.io
<!--
```hide kubectl wait deploy httpenv --for condition=available```
```key ^D```
```key ^C```
-->
- Send a few requests:

View File

@@ -101,7 +101,7 @@ If we wanted to talk to the API, we would need to:
<!--
```wait /version```
```keys ^J```
```key ^J```
-->
- Terminate the proxy:

View File

@@ -154,6 +154,11 @@ pod/pingpong-7c8bbcd9bc-6c9qz 1/1 Running 0 10m
- Leave that command running, so that we can keep an eye on these logs
<!--
```wait seq=3```
```tmux split-pane -h```
-->
]
---
@@ -206,11 +211,21 @@ We could! But the *deployment* would notice it right away, and scale back to the
- Interrupt `kubectl logs` (with Ctrl-C)
<!--
```tmux last-pane```
```key ^C```
-->
- Restart it:
```bash
kubectl logs deploy/pingpong --tail 1 --follow
```
<!--
```wait using pod/pingpong-```
```tmux last-pane```
-->
]
`kubectl logs` will warn us that multiple pods were found, and that it's showing us only one of them.
@@ -235,10 +250,30 @@ Let's leave `kubectl logs` running while we keep exploring.
watch kubectl get pods
```
<!--
```wait Every 2.0s```
```tmux split-pane -v```
-->
- Destroy the pod currently shown by `kubectl logs`:
```
kubectl delete pod pingpong-xxxxxxxxxx-yyyyy
```
<!--
```tmux select-pane -t 0```
```copy pingpong-[^-]*-.....```
```tmux last-pane```
```keys kubectl delete pod ```
```paste```
```key ^J```
```check```
```key ^D```
```tmux select-pane -t 1```
```key ^C```
```key ^D```
-->
]
---
@@ -307,7 +342,8 @@ Let's leave `kubectl logs` running while we keep exploring.
- Create the Cron Job:
```bash
kubectl run --schedule="*/3 * * * *" --restart=OnFailure --image=alpine sleep 10
kubectl run every3mins --schedule="*/3 * * * *" --restart=OnFailure \
--image=alpine sleep 10
```
- Check the resource that was created:
@@ -418,7 +454,7 @@ Let's leave `kubectl logs` running while we keep exploring.
<!--
```wait seq=```
```keys ^C```
```key ^C```
-->
]
@@ -447,6 +483,8 @@ class: extra-details
kubectl logs -l run=pingpong --tail 1 -f
```
<!-- ```wait error:``` -->
]
We see a message like the following one:

View File

@@ -12,9 +12,9 @@
<!--
```wait RESTARTS```
```keys ^C```
```key ^C```
```wait AVAILABLE```
```keys ^C```
```key ^C```
-->
- Now, create more `worker` replicas:

View File

@@ -97,6 +97,8 @@
ship init https://github.com/jpetazzo/kubercoins
```
<!-- ```wait Open browser``` -->
]
---
@@ -189,6 +191,11 @@
kubectl logs deploy/worker --tail=10 --follow --namespace=kustomcoins
```
<!--
```wait units of work done```
```key ^C```
-->
]
Note: it might take a minute or two for the worker to start.

View File

@@ -56,28 +56,6 @@
---
## Work in a separate namespace
- To avoid conflicts with existing resources, let's create and use a new namespace
.exercise[
- Create a new namespace:
```bash
kubectl create namespace orange
```
- Switch to that namespace:
```bash
kns orange
```
]
.warning[Make sure to call that namespace `orange`: it is hardcoded in the YAML files.]
---
## Deploying Consul
- We will use a slightly different YAML file
@@ -88,7 +66,9 @@
- the corresponding `volumeMounts` in the Pod spec
- the namespace `orange` used for discovery of Pods
- the label `consul` has been changed to `persistentconsul`
<br/>
(to avoid conflicts with the other Stateful Set)
.exercise[
@@ -117,7 +97,7 @@
kubectl get pv
```
- The Pod `consul-0` is not scheduled yet:
- The Pod `persistentconsul-0` is not scheduled yet:
```bash
kubectl get pods -o wide
```
@@ -132,9 +112,9 @@
- In a Stateful Set, the Pods are started one by one
- `consul-1` won't be created until `consul-0` is running
- `persistentconsul-1` won't be created until `persistentconsul-0` is running
- `consul-0` has a dependency on an unbound Persistent Volume Claim
- `persistentconsul-0` has a dependency on an unbound Persistent Volume Claim
- The scheduler won't schedule the Pod until the PVC is bound
@@ -172,7 +152,7 @@
- Once a PVC is bound, its pod can start normally
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
- Once the pod `persistentconsul-0` has started, `persistentconsul-1` can be created, etc.
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
@@ -180,7 +160,7 @@
- Check that our Consul clusters has 3 members indeed:
```bash
kubectl exec consul-0 consul members
kubectl exec persistentconsul-0 consul members
```
]

View File

@@ -84,14 +84,14 @@ Exactly what we need!
.exercise[
- View the logs for all the rng containers:
- View the logs for all the pingpong containers:
```bash
stern rng
stern pingpong
```
<!--
```wait HTTP/1.1```
```keys ^C```
```wait seq=```
```key ^C```
-->
]
@@ -117,7 +117,7 @@ Exactly what we need!
<!--
```wait weave-npc```
```keys ^C```
```key ^C```
-->
]
@@ -138,14 +138,14 @@ Exactly what we need!
.exercise[
- View the logs for all the things started with `kubectl create deployment`:
- View the logs for all the things started with `kubectl run`:
```bash
stern -l app
stern -l run
```
<!--
```wait units of work```
```keys ^C```
```wait seq=```
```key ^C```
-->
]

View File

@@ -120,6 +120,12 @@ This is our game plan:
kubectl create deployment testweb --image=nginx
```
<!--
```bash
kubectl wait deployment testweb --for condition=available
```
-->
- Find out the IP address of the pod with one of these two commands:
```bash
kubectl get pods -o wide -l app=testweb
@@ -154,6 +160,11 @@ The `curl` command should show us the "Welcome to nginx!" page.
curl $IP
```
<!--
```wait curl```
```key ^C```
-->
]
The `curl` command should now time out.

View File

@@ -121,7 +121,7 @@ Examples:
## One operator in action
- We will install the UPMC Enterprises ElasticSearch operator
- We will install [Elastic Cloud on Kubernetes](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html), an ElasticSearch operator
- This operator requires PersistentVolumes
@@ -206,51 +206,92 @@ Now, the StorageClass should have `(default)` next to its name.
## Install the ElasticSearch operator
- The operator needs:
- The operator provides:
- a Deployment for its controller
- a few CustomResourceDefinitions
- a Namespace for its other resources
- a ValidatingWebhookConfiguration for type checking
- a StatefulSet for its controller and webhook code
- a ServiceAccount, ClusterRole, ClusterRoleBinding for permissions
- a Namespace
- We have grouped all the definitions for these resources in a YAML file
- All these resources are grouped in a convenient YAML file
.exercise[
- Install the operator:
```bash
kubectl apply -f ~/container.training/k8s/elasticsearch-operator.yaml
kubectl apply -f ~/container.training/k8s/eck-operator.yaml
```
]
---
## Wait for the operator to be ready
## Check our new custom resources
- Some operators require to create their CRDs separately
- This operator will create its CRD itself
(i.e. the CRD is not listed in the YAML that we applied earlier)
- Let's see which CRDs were created
.exercise[
- Wait until the `elasticsearchclusters` CRD shows up:
- List all CRDs:
```bash
kubectl get crds
```
]
This operator supports ElasticSearch, but also Kibana and APM. Cool!
---
## Create the `eck-demo` namespace
- For clarity, we will create everything in a new namespace, `eck-demo`
- This namespace is hard-coded in the YAML files that we are going to use
- We need to create that namespace
.exercise[
- Create the `eck-demo` namespace:
```bash
kubectl create namespace eck-demo
```
- Switch to that namespace:
```bash
kns eck-demo
```
]
---
class: extra-details
## Can we use a different namespace?
Yes, but then we need to update all the YAML manifests that we
are going to apply in the next slides.
The `eck-demo` namespace is hard-coded in these YAML manifests.
Why?
Because when defining a ClusterRoleBinding that references a
ServiceAccount, we have to indicate in which namespace the
ServiceAccount is located.
---
## Create an ElasticSearch resource
- We can now create a resource with `kind: ElasticsearchCluster`
- We can now create a resource with `kind: ElasticSearch`
- The YAML for that resource will specify all the desired parameters:
- how many nodes do we want of each type (client, master, data)
- how many nodes we want
- image to use
- add-ons (kibana, cerebro, ...)
- whether to use TLS or not
@@ -260,7 +301,7 @@ Now, the StorageClass should have `(default)` next to its name.
- Create our ElasticSearch cluster:
```bash
kubectl apply -f ~/container.training/k8s/elasticsearch-cluster.yaml
kubectl apply -f ~/container.training/k8s/eck-elasticsearch.yaml
```
]
@@ -269,49 +310,88 @@ Now, the StorageClass should have `(default)` next to its name.
## Operator in action
- Over the next minutes, the operator will create:
- Over the next minutes, the operator will create our ES cluster
- StatefulSets (one for master nodes, one for data nodes)
- Deployments (for client nodes; and for add-ons like cerebro and kibana)
- Services (for all these pods)
- It will report our cluster status through the CRD
.exercise[
- Wait for all the StatefulSets to be fully up and running:
- Check the logs of the operator:
```bash
kubectl get statefulsets -w
stern --namespace=elastic-system operator
```
<!--
```wait elastic-operator-0```
```tmux split-pane -v```
--->
- Watch the status of the cluster through the CRD:
```bash
kubectl get es -w
```
<!--
```longwait green```
```key ^C```
```key ^D```
```key ^C```
-->
]
---
## Connecting to our cluster
- Since connecting directly to the ElasticSearch API is a bit raw,
<br/>we'll connect to the cerebro frontend instead
- It's not easy to use the ElasticSearch API from the shell
- But let's check at least if ElasticSearch is up!
.exercise[
- Edit the cerebro service to change its type from ClusterIP to NodePort:
- Get the ClusterIP of our ES instance:
```bash
kubectl patch svc cerebro-es -p "spec: { type: NodePort }"
kubectl get services
```
- Retrieve the NodePort that was allocated:
- Issue a request with `curl`:
```bash
kubectl get svc cerebro-es
curl http://`CLUSTERIP`:9200
```
- Connect to that port with a browser
]
We get an authentication error. Our cluster is protected!
---
## (Bonus) Setup filebeat
## Obtaining the credentials
- The operator creates a user named `elastic`
- It generates a random password and stores it in a Secret
.exercise[
- Extract the password:
```bash
kubectl get secret demo-es-elastic-user \
-o go-template="{{ .data.elastic | base64decode }} "
```
- Use it to connect to the API:
```bash
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200
```
]
We should see a JSON payload with the `"You Know, for Search"` tagline.
---
## Sending data to the cluster
- Let's send some data to our brand new ElasticSearch cluster!
@@ -321,22 +401,170 @@ Now, the StorageClass should have `(default)` next to its name.
- Deploy filebeat:
```bash
kubectl apply -f ~/container.training/k8s/filebeat.yaml
kubectl apply -f ~/container.training/k8s/eck-filebeat.yaml
```
- Wait until some pods are up:
```bash
watch kubectl get pods -l k8s-app=filebeat
```
<!--
```wait Running```
```key ^C```
-->
- Check that a filebeat index was created:
```bash
curl -u elastic:`PASSWORD` http://`CLUSTERIP`:9200/_cat/indices
```
]
We should see at least one index being created in cerebro.
---
## Deploying an instance of Kibana
- Kibana can visualize the logs injected by filebeat
- The ECK operator can also manage Kibana
- Let's give it a try!
.exercise[
- Deploy a Kibana instance:
```bash
kubectl apply -f ~/container.training/k8s/eck-kibana.yaml
```
- Wait for it to be ready:
```bash
kubectl get kibana -w
```
<!--
```longwait green```
```key ^C```
-->
]
---
## (Bonus) Access log data with kibana
## Connecting to Kibana
- Let's expose kibana (by making kibana-es a NodePort too)
- Kibana is automatically set up to conect to ElasticSearch
- Then access kibana
(this is arranged by the YAML that we're using)
- We'll need to configure kibana indexes
- However, it will ask for authentication
- It's using the same user/password as ElasticSearch
.exercise[
- Get the NodePort allocated to Kibana:
```bash
kubectl get services
```
- Connect to it with a web browser
- Use the same user/password as before
]
---
## Setting up Kibana
After the Kibana UI loads, we need to click around a bit
.exercise[
- Pick "explore on my own"
- Click on Use Elasticsearch data / Connect to your Elasticsearch index"
- Enter `filebeat-*` for the index pattern and click "Next step"
- Select `@timestamp` as time filter field name
- Click on "discover" (the small icon looking like a compass on the left bar)
- Play around!
]
---
## Scaling up the cluster
- At this point, we have only one node
- We are going to scale up
- But first, we'll deploy Cerebro, an UI for ElasticSearch
- This will let us see the state of the cluster, how indexes are sharded, etc.
---
## Deploying Cerebro
- Cerebro is stateless, so it's fairly easy to deploy
(one Deployment + one Service)
- However, it needs the address and credentials for ElasticSearch
- We prepared yet another manifest for that!
.exercise[
- Deploy Cerebro:
```bash
kubectl apply -f ~/container.training/k8s/eck-cerebro.yaml
```
- Lookup the NodePort number and connect to it:
```bash
kuebctl get services
```
]
---
## Scaling up the cluster
- We can see on Cerebro that the cluster is "yellow"
(because our index is not replicated)
- Let's change that!
.exercise[
- Edit the ElasticSearch cluster manifest:
```bash
kubectl edit es demo
```
- Find the field `count: 1` and change it to 3
- Save and quit
<!--
```wait Please edit```
```keys /count:```
```key ^J```
```keys $r3:x```
```key ^J```
-->
]
---
@@ -376,13 +604,11 @@ We should see at least one index being created in cerebro.
- Look at the ElasticSearch resource definition
(`~/container.training/k8s/elasticsearch-cluster.yaml`)
(`~/container.training/k8s/eck-elasticsearch.yaml`)
- What should happen if we flip the `use-tls` flag? Twice?
- What should happen if we flip the TLS flag? Twice?
- What should happen if we remove / re-add the kibana or cerebro sections?
- What should happen if we change the number of nodes?
- What should happen if we add another group of nodes?
- What if we want different images or parameters for the different nodes?

View File

@@ -108,7 +108,7 @@ kubectl wait deploy/worker --for condition=available
<!--
```wait units of work done, updating hash counter```
```keys ^C```
```key ^C```
-->
]

View File

@@ -220,6 +220,8 @@
sudo vim /etc/kubernetes/manifests/kube-apiserver.yaml
```
<!-- ```wait apiVersion``` -->
]
---
@@ -240,6 +242,16 @@
- Save, quit
<!--
```keys /--enable-admission-plugins=```
```key ^J```
```key $```
```keys a,PodSecurityPolicy```
```key Escape```
```keys :wq```
```key ^J```
-->
]
---
@@ -271,6 +283,8 @@
kubectl run testpsp1 --image=nginx --restart=Never
```
<!-- ```wait forbidden: no providers available``` -->
- Try to create a Deployment:
```bash
kubectl run testpsp2 --image=nginx
@@ -498,3 +512,22 @@ class: extra-details
- bind `psp:restricted` to the group `system:authenticated`
- bind `psp:privileged` to the ServiceAccount `kube-system:default`
---
## Fixing the cluster
- Let's disable the PSP admission plugin
.exercise[
- Edit the Kubernetes API server static pod manifest
- Remove the PSP admission plugin
- This can be done with this one-liner:
```bash
sudo sed -i s/,PodSecurityPolicy// /etc/kubernetes/manifests/kube-apiserver.yaml
```
]

View File

@@ -197,7 +197,7 @@ If you want to use an external key/value store, add one of the following:
<!--
```longwait PX node status reports portworx service is healthy```
```keys ^C```
```key ^C```
-->
]
@@ -374,7 +374,7 @@ spec:
autopilot prompt detection expects $ or # at the beginning of the line.
```wait postgres@postgres```
```keys PS1="\u@\h:\w\n\$ "```
```keys ^J```
```key ^J```
-->
- Check that default databases have been created correctly:
@@ -428,7 +428,7 @@ autopilot prompt detection expects $ or # at the beginning of the line.
psql demo -c "select count(*) from pgbench_accounts"
```
<!-- ```keys ^D``` -->
<!-- ```key ^D``` -->
]
@@ -491,7 +491,7 @@ By "disrupt" we mean: "disconnect it from the network".
- Logout to go back on `node1`
<!-- ```keys ^D``` -->
<!-- ```key ^D``` -->
- Watch the events unfolding with `kubectl get events -w` and `kubectl get pods -w`
@@ -519,7 +519,7 @@ By "disrupt" we mean: "disconnect it from the network".
<!--
```wait postgres@postgres```
```keys PS1="\u@\h:\w\n\$ "```
```keys ^J```
```key ^J```
-->
- Check the number of rows in the `pgbench_accounts` table:
@@ -527,7 +527,7 @@ By "disrupt" we mean: "disconnect it from the network".
psql demo -c "select count(*) from pgbench_accounts"
```
<!-- ```keys ^D``` -->
<!-- ```key ^D``` -->
]

View File

@@ -204,32 +204,46 @@ We need to:
## Step 1: install Helm
- If we already installed Helm earlier, these commands won't break anything
- If we already installed Helm earlier, this command won't break anything
.exercice[
.exercise[
- Install Tiller (Helm's server-side component) on our cluster:
- Install the Helm CLI:
```bash
helm init
```
- Give Tiller permission to deploy things on our cluster:
```bash
kubectl create clusterrolebinding add-on-cluster-admin \
--clusterrole=cluster-admin --serviceaccount=kube-system:default
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 \
| bash
```
]
---
## Step 2: install Prometheus
## Step 2: add the `stable` repo
- Skip this if we already installed Prometheus earlier
- This will add the repository containing the chart for Prometheus
(in doubt, check with `helm list`)
- This command is idempotent
.exercice[
(it won't break anything if the repository was already added)
.exercise[
- Add the repository:
```bash
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
```
]
---
## Step 3: install Prometheus
- The following command, just like the previous ones, is idempotent
(it won't error out if Prometheus is already installed)
.exercise[
- Install Prometheus on our cluster:
```bash

View File

@@ -94,7 +94,7 @@
<!--
```wait NAME```
```keys ^C```
```key ^C```
-->
- Update `worker` either with `kubectl edit`, or by running:
@@ -150,7 +150,7 @@ That rollout should be pretty quick. What shows in the web UI?
<!--
```wait Waiting for deployment```
```keys ^C```
```key ^C```
-->
]
@@ -229,11 +229,7 @@ If you didn't deploy the Kubernetes dashboard earlier, just skip this slide.
.exercise[
<!--
```keys
^C
```
-->
<!-- ```key ^C``` -->
- Cancel the deployment and wait for the dust to settle:
```bash
@@ -336,7 +332,7 @@ We might see something like 1, 4, 5.
- Check the annotations for our replica sets:
```bash
kubectl describe replicasets -l app=worker | grep -A3
kubectl describe replicasets -l app=worker | grep -A3 ^Annotations
```
]

View File

@@ -19,17 +19,14 @@
.exercise[
- Open two new terminals to check what's going on with pods and deployments:
- Open a new terminal to keep an eye on our pods:
```bash
kubectl get pods -w
kubectl get deployments -w
```
<!--
```wait RESTARTS```
```keys ^C```
```wait AVAILABLE```
```keys ^C```
```tmux split-pane -h```
-->
- Now, create more `worker` replicas:
@@ -73,6 +70,11 @@ The graph in the web UI should go up again.
kubectl scale deployment worker --replicas=10
```
<!--
```key ^D```
```key ^C```
-->
]
--

View File

@@ -105,6 +105,11 @@
docker run ctr.run/github.com/jpetazzo/container.training/dockercoins/hasher
```
<!--
```longwait Sinatra```
```key ^C```
-->
]
There might be a long pause before the first layer is pulled,

View File

@@ -427,7 +427,7 @@ nodes and encryption of gossip traffic) were removed for simplicity.
<!--
```wait Synced node info```
```keys ^C```
```key ^C```
-->
- Check the health of the cluster:

View File

@@ -1,7 +1,7 @@
## Versions installed
- Kubernetes 1.15.3
- Docker Engine 19.03.1
- Kubernetes 1.17.1
- Docker Engine 19.03.5
- Docker Compose 1.24.1
<!-- ##VERSION## -->
@@ -23,6 +23,10 @@ class: extra-details
## Kubernetes and Docker compatibility
- Kubernetes 1.17 validates Docker Engine version [up to 19.03](https://github.com/kubernetes/kubernetes/pull/84476)
*however ...*
- Kubernetes 1.15 validates Docker Engine versions [up to 18.09](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.15.md#dependencies)
<br/>
(the latest version when Kubernetes 1.14 was released)
@@ -40,5 +44,47 @@ class: extra-details
- "Validates" = continuous integration builds with very extensive (and expensive) testing
- The Docker API is versioned, and offers strong backward-compatibility
<br/>
(if a client uses e.g. API v1.25, the Docker Engine will keep behaving the same way)
(If a client uses e.g. API v1.25, the Docker Engine will keep behaving the same way)
---
## Kubernetes versioning and cadence
- Kubernetes versions are expressed using *semantic versioning*
(a Kubernetes version is expressed as MAJOR.MINOR.PATCH)
- There is a new *patch* release whenever needed
(generally, there is about [2 to 4 weeks](https://github.com/kubernetes/sig-release/blob/master/release-engineering/role-handbooks/patch-release-team.md#release-timing) between patch releases,
except when a critical bug or vulnerability is found:
in that case, a patch release will follow as fast as possible)
- There is a new *minor* release approximately every 3 months
- At any given time, 3 *minor* releases are maintained
(in other words, a given *minor* release is maintained about 9 months)
---
## Kubernetes version compatibility
*Should my version of `kubectl` match exactly my cluster version?*
- `kubectl` can be up to one minor version older or newer than the cluster
(if cluster version is 1.15.X, `kubectl` can be 1.14.Y, 1.15.Y, or 1.16.Y)
- Things *might* work with larger version differences
(but they will probably fail randomly, so be careful)
- This is an example of an error indicating version compability issues:
```
error: SchemaError(io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus):
invalid object doesn't have additional properties
```
- Check [the documentation](https://kubernetes.io/docs/setup/release/version-skew-policy/#kubectl) for the whole story about compatibility

View File

@@ -110,6 +110,8 @@ It runs a single NGINX container.
kubectl create -f ~/container.training/k8s/nginx-1-without-volume.yaml
```
<!-- ```bash kubectl wait pod/nginx-without-volume --for condition=ready ``` -->
- Get its IP address:
```bash
IPADDR=$(kubectl get pod nginx-without-volume -o jsonpath={.status.podIP})
@@ -175,6 +177,8 @@ spec:
kubectl create -f ~/container.training/k8s/nginx-2-with-volume.yaml
```
<!-- ```bash kubectl wait pod/nginx-with-volume --for condition=ready ``` -->
- Get its IP address:
```bash
IPADDR=$(kubectl get pod nginx-with-volume -o jsonpath={.status.podIP})
@@ -269,6 +273,11 @@ spec:
kubectl get pods -o wide --watch
```
<!--
```wait NAME```
```tmux split-pane -v```
-->
]
---
@@ -282,11 +291,18 @@ spec:
kubectl create -f ~/container.training/k8s/nginx-3-with-git.yaml
```
<!--
```bash kubectl wait pod/nginx-with-git --for condition=initialized```
```bash IP=$(kubectl get pod nginx-with-git -o jsonpath={.status.podIP})```
-->
- As soon as we see its IP address, access it:
```bash
curl $IP
```
<!-- ```bash /bin/sleep 5``` -->
- A few seconds later, the state of the pod will change; access it again:
```bash
curl $IP
@@ -399,10 +415,19 @@ spec:
## Trying the init container
.exercise[
- Repeat the same operation as earlier
(try to send HTTP requests as soon as the pod comes up)
<!--
```key ^D```
```key ^C```
-->
]
- This time, instead of "403 Forbidden" we get a "connection refused"
- NGINX doesn't start until the git container has done its job

View File

@@ -89,6 +89,15 @@ def flatten(titles):
def generatefromyaml(manifest, filename):
manifest = yaml.safe_load(manifest)
if "zip" not in manifest:
if manifest["slides"].endswith('/'):
manifest["zip"] = manifest["slides"] + "slides.zip"
else:
manifest["zip"] = manifest["slides"] + "/slides.zip"
if "html" not in manifest:
manifest["html"] = filename + ".html"
markdown, titles = processchapter(manifest["chapters"], filename)
logging.debug("Found {} titles.".format(len(titles)))
toc = gentoc(titles)
@@ -117,6 +126,8 @@ def generatefromyaml(manifest, filename):
html = html.replace("@@CHAT@@", manifest["chat"])
html = html.replace("@@GITREPO@@", manifest["gitrepo"])
html = html.replace("@@SLIDES@@", manifest["slides"])
html = html.replace("@@ZIP@@", manifest["zip"])
html = html.replace("@@HTML@@", manifest["html"])
html = html.replace("@@TITLE@@", manifest["title"].replace("\n", " "))
html = html.replace("@@SLIDENUMBERPREFIX@@", manifest.get("slidenumberprefix", ""))
return html

View File

@@ -40,7 +40,7 @@ class: extra-details
<!--
```wait units of work done```
```keys ^C```
```key ^C```
-->
]
@@ -75,7 +75,7 @@ Tip: use `^S` and `^Q` to pause/resume log output.
```bash top```
```wait Tasks```
```keys ^C```
```key ^C```
-->
- run `vmstat 1` to see I/O usage (si/so/bi/bo)
@@ -85,7 +85,7 @@ Tip: use `^S` and `^Q` to pause/resume log output.
```bash vmstat 1```
```wait memory```
```keys ^C```
```key ^C```
-->
]

View File

@@ -72,6 +72,12 @@ Misattributed to Benjamin Franklin
- Slides will remain online so you can review them later if needed
- You can download the slides using that URL:
@@ZIP@@
(then open the file `@@HTML@@`)
---
class: in-person

View File

@@ -343,7 +343,7 @@ class: extra-details
- Stop the application by hitting `^C`
<!--
```keys ^C```
```key ^C```
-->
]

View File

@@ -267,7 +267,7 @@ class: extra-details
- Switch back to `node1` (with `exit`, `Ctrl-D` ...)
<!-- ```keys ^D``` -->
<!-- ```key ^D``` -->
- View the cluster from `node1`, which is a manager:
```bash

View File

@@ -72,7 +72,7 @@
```
<!-- ```wait User-Agent``` -->
<!-- ```keys ^C``` -->
<!-- ```key ^C``` -->
]

View File

@@ -158,7 +158,7 @@ class: elk-manual
```
<!-- ```wait "message" => "ok"``` -->
<!-- ```keys ^C``` -->
<!-- ```key ^C``` -->
]
@@ -266,7 +266,7 @@ The test message should show up in the logstash container logs.
```
<!-- ```wait Detected task failure``` -->
<!-- ```keys ^C``` -->
<!-- ```key ^C``` -->
]