mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Add dynamic provisioning and PostgreSQL example
In this section, we setup Portworx to have a dynamic provisioner. Then we use it to deploy a PostgreSQL Stateful Set. Finally we simulate a node failure and observe the failover.
This commit is contained in:
580
k8s/portworx.yaml
Normal file
580
k8s/portworx.yaml
Normal file
@@ -0,0 +1,580 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: stork-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.cfg: |-
|
||||
{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"extenders": [
|
||||
{
|
||||
"urlPrefix": "http://stork-service.kube-system.svc:8099",
|
||||
"apiVersion": "v1beta1",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 5,
|
||||
"enableHttps": false,
|
||||
"nodeCacheCapable": false
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "list", "watch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["deployments", "deployments/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["statefulsets", "statefulsets/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: stork-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
name: stork
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8099
|
||||
targetPort: 8099
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
tier: control-plane
|
||||
name: stork
|
||||
namespace: kube-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
name: stork
|
||||
tier: control-plane
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /stork
|
||||
- --driver=pxd
|
||||
- --verbose
|
||||
- --leader-elect=true
|
||||
- --health-monitor-interval=120
|
||||
imagePullPolicy: Always
|
||||
image: openstorage/stork:1.1.3
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
name: stork
|
||||
hostPID: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
serviceAccountName: stork-account
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-snapshot-sc
|
||||
provisioner: stork-snapshot
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resourceNames: ["kube-scheduler"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["delete", "get", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["bindings", "pods/binding"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers", "services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["app", "extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-scheduler-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
name: stork-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=true
|
||||
- --scheduler-name=stork
|
||||
- --policy-configmap=stork-config
|
||||
- --policy-configmap-namespace=kube-system
|
||||
- --lock-object-name=stork-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
name: stork-scheduler
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork-scheduler
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
hostPID: false
|
||||
serviceAccountName: stork-scheduler-account
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: portworx-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
selector:
|
||||
name: portworx
|
||||
ports:
|
||||
- name: px-api
|
||||
protocol: TCP
|
||||
port: 9001
|
||||
targetPort: 9001
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-get-put-list-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "get", "update", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "update", "create"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: node-get-put-list-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: portworx
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role
|
||||
namespace: portworx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role-binding
|
||||
namespace: portworx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: portworx
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true"
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: px/enabled
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
containers:
|
||||
- name: portworx
|
||||
image: portworx/oci-monitor:1.4.2.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
["-c", "px-workshop", "-s", "/dev/loop0", "-b",
|
||||
"-x", "kubernetes"]
|
||||
env:
|
||||
- name: "PX_TEMPLATE_VERSION"
|
||||
value: "v4"
|
||||
|
||||
livenessProbe:
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 840 # allow image pull in slow networks
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 9001
|
||||
readinessProbe:
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 9015
|
||||
terminationMessagePath: "/tmp/px-termination-log"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
mountPath: /etc/pwx
|
||||
- name: optpwx
|
||||
mountPath: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
mountPath: /host_proc/1/ns
|
||||
- name: sysdmount
|
||||
mountPath: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
mountPath: /var/cores
|
||||
- name: journalmount1
|
||||
mountPath: /var/run/log
|
||||
readOnly: true
|
||||
- name: journalmount2
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
- name: dbusmount
|
||||
mountPath: /var/run/dbus
|
||||
restartPolicy: Always
|
||||
serviceAccountName: px-account
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
hostPath:
|
||||
path: /etc/pwx
|
||||
- name: optpwx
|
||||
hostPath:
|
||||
path: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: sysdmount
|
||||
hostPath:
|
||||
path: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
hostPath:
|
||||
path: /var/cores
|
||||
- name: journalmount1
|
||||
hostPath:
|
||||
path: /var/run/log
|
||||
- name: journalmount2
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dbusmount
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-lh-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
nodePort: 32678
|
||||
- name: https
|
||||
port: 443
|
||||
nodePort: 32679
|
||||
selector:
|
||||
tier: px-web-console
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: px-web-console
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "init"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
containers:
|
||||
- name: px-lighthouse
|
||||
image: portworx/px-lighthouse:1.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
- name: config-sync
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "sync"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
serviceAccountName: px-lh-account
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
30
k8s/postgres.yaml
Normal file
30
k8s/postgres.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
581
slides/k8s/portworx.md
Normal file
581
slides/k8s/portworx.md
Normal file
@@ -0,0 +1,581 @@
|
||||
# Highly available Persistent Volumes
|
||||
|
||||
- How can we achieve true durability?
|
||||
|
||||
- How can we store data that would survive the loss of a node?
|
||||
|
||||
--
|
||||
|
||||
- We need to use Persistent Volumes backed by highly available storage systems
|
||||
|
||||
- There are many ways to achieve that:
|
||||
|
||||
- leveraging our cloud's storage APIs
|
||||
|
||||
- using NAS/SAN systems or file servers
|
||||
|
||||
- distributed storage systems
|
||||
|
||||
--
|
||||
|
||||
- We are going to see one distributed storage system in action
|
||||
|
||||
---
|
||||
|
||||
## Our test scenario
|
||||
|
||||
- We will set up a distributed storage system on our cluster
|
||||
|
||||
- We will use it to deploy a SQL database (PostgreSQL)
|
||||
|
||||
- We will insert some test data in the database
|
||||
|
||||
- We will disrupt the node running the database
|
||||
|
||||
- We will see how it recovers
|
||||
|
||||
---
|
||||
|
||||
## Portworx
|
||||
|
||||
- Portworx is a *commercial* persistent storage solution for containers
|
||||
|
||||
- It works with Kubernetes, but also Mesos, Swarm ...
|
||||
|
||||
- It provides [hyper-converged](https://en.wikipedia.org/wiki/Hyper-converged_infrastructure) storage
|
||||
|
||||
(=storage is provided by regular compute nodes)
|
||||
|
||||
- We're going to use it here because it can be deployed on any Kubernetes cluster
|
||||
|
||||
(it doesn't require any particular infrastructure)
|
||||
|
||||
- We don't endorse or support Portworx in any particular way
|
||||
|
||||
(but we appreciate that it's super easy to install!)
|
||||
|
||||
---
|
||||
|
||||
## Portworx requirements
|
||||
|
||||
- Kubernetes cluster ✔️
|
||||
|
||||
- Optional key/value store (etcd or Consul) ❌
|
||||
|
||||
- At least one available block device ❌
|
||||
|
||||
---
|
||||
|
||||
## The key-value store
|
||||
|
||||
- In the current version of Portworx (1.4) it is recommended to use etcd or Consul
|
||||
|
||||
- But Portworx also has beta support for an embedded key/value store
|
||||
|
||||
- For simplicity, we are going to use the latter option
|
||||
|
||||
(but we could use the Consul service that we deployed earlier, too)
|
||||
|
||||
---
|
||||
|
||||
## One available block device
|
||||
|
||||
- Block device = disk or partition on a disk
|
||||
|
||||
- We can see block devices with `lsblk`
|
||||
|
||||
(or `cat /proc/partitions` if we're old school like that!)
|
||||
|
||||
- If we don't have a spare disk or partition, we can use a *loop device*
|
||||
|
||||
- A loop device is a block device actually backed by a file
|
||||
|
||||
- These are frequently used to mount ISO (CD/DVD) images or VM disk images
|
||||
|
||||
---
|
||||
|
||||
## Setting up a loop device
|
||||
|
||||
- We are going to create a 10 GB (empty) file on each node
|
||||
|
||||
- Then make a loop device from it, to be used by Portworx
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a 10 GB file on each node:
|
||||
```bash
|
||||
for N in $(seq 1 5); do ssh node$N sudo truncate --size 10G /portworx.blk; done
|
||||
```
|
||||
(If SSH asks to confirm host keys, enter `yes` each time.)
|
||||
|
||||
- Associate the file to a loop device on each node:
|
||||
```bash
|
||||
for N in $(seq 1 5); do ssh node$N sudo losetup /dev/loop0 /portworx.blk; done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Installing Portworx
|
||||
|
||||
- To install Portworx, we need to go to https://install.portworx.com/
|
||||
|
||||
- This website will ask us a bunch of questoins about our cluster
|
||||
|
||||
- Then, it will generate a YAML file that we should apply to our cluster
|
||||
|
||||
--
|
||||
|
||||
- Or, we can just apply that YAML file directly (it's in `k8s/portworx.yaml`)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Install Portworx:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/portworx.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Generating a custom YAML file
|
||||
|
||||
If you want to generate a YAML file tailored to your own needs, the easiest
|
||||
way is to use https://install.portworx.com/.
|
||||
|
||||
FYI, this is how we obtained the YAML file used earlier:
|
||||
```
|
||||
KBVER=$(kubectl version -o json | jq -r .serverVersion.gitVersion)
|
||||
BLKDEV=/dev/loop0
|
||||
curl https://install.portworx.com/1.4/?kbver=$KBVER&b=true&s=$BLKDEV&c=px-workshop&stork=true&lh=true
|
||||
```
|
||||
If you want to use an external key/value store, add one of the following:
|
||||
```
|
||||
&k=etcd://`XXX`:2379
|
||||
&k=consul://`XXX`:8500
|
||||
```
|
||||
... where `XXX` is the name or address of your etcd or Consul server.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic provisioning of persistent volumes
|
||||
|
||||
- We are going to run PostgreSQL in a Stateful set
|
||||
|
||||
- The Stateful set will specify a `volumeClaimTemplate`
|
||||
|
||||
- That `volumeClaimTemplate` will create Persistent Volume Claims
|
||||
|
||||
- Kubernetes' [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) will satisfy these Persistent Volume Claims
|
||||
|
||||
(by creating Persistent Volumes and binding them to the claims)
|
||||
|
||||
- The Persistent Volumes are then available for the PostgreSQL pods
|
||||
|
||||
---
|
||||
|
||||
## Storage Classes
|
||||
|
||||
- It's possible that multiple storage systems are available
|
||||
|
||||
- Or, that a storage system offers multiple tiers of storage
|
||||
|
||||
(SSD vs. magnetic; mirrored or not; etc.)
|
||||
|
||||
- We need to tell Kubernetes *which* system and tier to use
|
||||
|
||||
- This is achieved by creating a Storage Class
|
||||
|
||||
- A `volumeClaimTemplate` can indicate which Storage Class to use
|
||||
|
||||
- It is also possible to mark a Storage Class as "default"
|
||||
|
||||
(it will be used if a `volumeClaimTeamplate` doesn't specify one)
|
||||
|
||||
---
|
||||
|
||||
## Our default Storage Class
|
||||
|
||||
This is our Storage Class (in `k8s/storage-class.yaml`):
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
```
|
||||
|
||||
- It says "use Portworx to create volumes"
|
||||
|
||||
- It tells to Portworx "keep 2 replicas of these volumes"
|
||||
|
||||
- It marks the Storage Class as being the default one
|
||||
|
||||
---
|
||||
|
||||
## Creating our Storage Class
|
||||
|
||||
- Let's apply that YAML file!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the Storage Class:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/storage-class.yaml
|
||||
```
|
||||
|
||||
- Check that it is now available:
|
||||
```bash
|
||||
kubectl get sc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should show as `portworx-replicated (default)`.
|
||||
|
||||
---
|
||||
|
||||
## Our Postgres Stateful set
|
||||
|
||||
- The next slide shows `k8s/postgres.yaml`
|
||||
|
||||
- It defines a Stateful set
|
||||
|
||||
- With a `volumeClaimTemplate` requesting a 1 GB volume
|
||||
|
||||
- That volume will be mounted to `/var/lib/postgresql`
|
||||
|
||||
- There is another little detail: we enable the `stork` scheduler
|
||||
|
||||
- The `stork` scheduler is optional (it's specific to Portworx)
|
||||
|
||||
- It helps the Kubernetes scheduler to colocate the pod with its volume
|
||||
|
||||
(see [this blog post](https://portworx.com/stork-storage-orchestration-kubernetes/) for more details about that)
|
||||
|
||||
---
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Creating the Stateful set
|
||||
|
||||
- Before applying the YAML, watch what's going on with `kubectl get events -w`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply that YAML:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/postgres.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our PostgreSQL pod
|
||||
|
||||
- We will use `kubectl exec` to get a shell in the pod
|
||||
|
||||
- Good to know: we need to use the `postgres` user in the pod
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get a shell in the pod, as the `postgres` user:
|
||||
```bash
|
||||
kubectl exec -ti postgres-0 su postgres
|
||||
```
|
||||
|
||||
- Check that default databases have been created correctly:
|
||||
```bash
|
||||
psql -l
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(This should show us 3 lines: postgres, template0, and template1.)
|
||||
|
||||
---
|
||||
|
||||
## Inserting data in PostgreSQL
|
||||
|
||||
- We will create a database and populate it with `pgbench`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a database named `demo`:
|
||||
```bash
|
||||
createdb demo
|
||||
```
|
||||
|
||||
- Populate it with `pgbench`:
|
||||
```bash
|
||||
pgbench -i -s 10 demo
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
- The `-i` flag means "create tables"
|
||||
|
||||
- The `-s 10` flag means "create 10 x 100,000 rows"
|
||||
|
||||
---
|
||||
|
||||
## Checking how much data we have now
|
||||
|
||||
- The `pgbench` tool inserts rows in table `pgbench_accounts`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that the `demo` base exists:
|
||||
```bash
|
||||
psql -l
|
||||
```
|
||||
|
||||
- Check how many rows we have in `pgbench_accounts`:
|
||||
```bash
|
||||
psql demo -c "select count(*) from pgbench_accounts"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(We should see a count of 1,000,000 rows.)
|
||||
|
||||
---
|
||||
|
||||
## Find out which node is hosting the database
|
||||
|
||||
- We can find that information with `kubectl get pods -o wide`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the node running the database:
|
||||
```bash
|
||||
kuebectl get pod postgres-0 -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We are going to disrupt that node.
|
||||
|
||||
--
|
||||
|
||||
By "disrupt" we mean: "disconnect it from the network".
|
||||
|
||||
---
|
||||
|
||||
## Disconnect the node
|
||||
|
||||
- We will use `iptables` to block all traffic exiting the node
|
||||
|
||||
(except SSH traffic, so we can repair the node later if needed)
|
||||
|
||||
.exercise[
|
||||
|
||||
- SSH to the node to disrupt:
|
||||
```bash
|
||||
ssh `nodeX`
|
||||
```
|
||||
|
||||
- Allow SSH traffic leaving the node, but block all other traffic:
|
||||
```bash
|
||||
sudo iptables -I OUTPUT -p tcp --sport 22 -j ACCEPT
|
||||
sudo iptables -I OUTPUT 2 -j DROP
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Check that the node is disconnected
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that the node can't communicate with other nodes:
|
||||
```bash
|
||||
ping -c 3 node1
|
||||
```
|
||||
|
||||
- Logout to go back on `node1`
|
||||
|
||||
- Watch the events unfolding with `kubectl get events -w` and `kubectl get pods -w`
|
||||
|
||||
]
|
||||
|
||||
- It will take some time for Kubernetes to mark the node as unhealthy
|
||||
|
||||
- Then it will attempt to reschedule the pod to another node
|
||||
|
||||
- In about a minute, our pod should be up and running again
|
||||
|
||||
---
|
||||
|
||||
## Check that our data is still available
|
||||
|
||||
- We are going to reconnect to the (new) pod and check
|
||||
|
||||
.exercise[
|
||||
|
||||
- Get a shell on the pod:
|
||||
```bash
|
||||
kubectl exec -ti postgres-0 su postgres
|
||||
```
|
||||
|
||||
- Check the number of rows in the `pgbench_accounts` table:
|
||||
```bash
|
||||
psql demo -c "select count(*) from pgbench_accounts
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Double-check that the pod has really moved
|
||||
|
||||
- Just to make sure the system is not bluffing!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look on which node the pod is now running
|
||||
```bash
|
||||
kubectl get pod postgres-0 -o wide
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Re-enable the node
|
||||
|
||||
- Let's fix the node that we disconnected from the network
|
||||
|
||||
.exercise[
|
||||
|
||||
- SSH to the node:
|
||||
```bash
|
||||
ssh `nodeX`
|
||||
```
|
||||
|
||||
- Remove the iptables rule blocking traffic:
|
||||
```bash
|
||||
sudo iptables -D OUTPUT 2
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## A few words about this PostgreSQL setup
|
||||
|
||||
- In a real deployment, you would want to set a password
|
||||
|
||||
- This can be done by creating a `secret`:
|
||||
```
|
||||
kubectl create secret generic postgres \
|
||||
--from-literal=password=$(base64 /dev/urandom | head -c16)
|
||||
```
|
||||
|
||||
- And then passing that secret to the container:
|
||||
```yaml
|
||||
env:
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgres
|
||||
key: password
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Troubleshooting Portworx
|
||||
|
||||
- If we need to see what's going on with Portworx:
|
||||
```
|
||||
PXPOD=$(kubectl -n kube-system get pod -l name=portworx -o json |
|
||||
jq -r .items[0].metadata.name)
|
||||
kubectl -n kube-system exec $PXPOD -- /opt/pwx/bin/pxctl status
|
||||
```
|
||||
|
||||
- We can also connect to Lighthouse (a web UI)
|
||||
|
||||
- check the port with `kubectl -n kube-system get svc px-lighthouse`
|
||||
|
||||
- connect to that port
|
||||
|
||||
- the default login/password is `admin/Password1`
|
||||
|
||||
- then specify `portworx-service` as the endpoint
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Removing Portworx
|
||||
|
||||
- Portworx provides a storage driver
|
||||
|
||||
- It needs to place itself "above" the Kubelet
|
||||
|
||||
(it installs itself straight on the nodes)
|
||||
|
||||
- To remove it, we need to do more than just deleting its Kubernetes resources
|
||||
|
||||
- It is done by applying a special label:
|
||||
```
|
||||
kubectl label nodes --all px/enabled=remove --overwrite
|
||||
```
|
||||
|
||||
- Then removing a bunch of local files:
|
||||
```
|
||||
sudo chattr -i /etc/pwx/.private.json
|
||||
sudo rm -rf /etc/pwx /opt/pwx
|
||||
```
|
||||
|
||||
(on each node where Portworx was running)
|
||||
@@ -64,7 +64,7 @@
|
||||
|
||||
- traditional storage systems (NFS, iSCSI, FC...)
|
||||
|
||||
- distributed storage (Ceph, Clusterfs, Portworx...)
|
||||
- distributed storage (Ceph, Glusterfs, Portworx...)
|
||||
|
||||
- Using a persistent volume requires:
|
||||
|
||||
|
||||
@@ -13,16 +13,7 @@ chapters:
|
||||
- k8s/build-with-kaniko.md
|
||||
- k8s/configuration.md
|
||||
- k8s/statefulsets.md
|
||||
- |
|
||||
# Persistence
|
||||
|
||||
setting up portworx using consul
|
||||
|
||||
losetup trick
|
||||
|
||||
explain install/remove
|
||||
|
||||
in action with PostgreSQL
|
||||
- k8s/portworx.md
|
||||
- - k8s/authn-authz.md
|
||||
- |
|
||||
# Ingress
|
||||
|
||||
Reference in New Issue
Block a user