mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-14 17:49:59 +00:00
Update Consul section
This commit is contained in:
77
k8s/consul-1.yaml
Normal file
77
k8s/consul-1.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
# Basic Consul cluster using Cloud Auto-Join.
|
||||
# Caveats:
|
||||
# - no actual persistence
|
||||
# - scaling down to 1 will break the cluster
|
||||
# - pods may be colocated
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
@@ -1,5 +1,9 @@
|
||||
# Better Consul cluster.
|
||||
# There is still no actual persistence, but:
|
||||
# - podAntiaffinity prevents pod colocation
|
||||
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
@@ -11,17 +15,16 @@ rules:
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -68,11 +71,16 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
104
k8s/consul-3.yaml
Normal file
104
k8s/consul-3.yaml
Normal file
@@ -0,0 +1,104 @@
|
||||
# Even better Consul cluster.
|
||||
# That one uses a volumeClaimTemplate to achieve true persistence.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
@@ -58,7 +58,7 @@
|
||||
|
||||
## Deploying Consul
|
||||
|
||||
- We will use a slightly different YAML file
|
||||
- Let's use a new manifest for our Consul cluster
|
||||
|
||||
- The only differences between that file and the previous one are:
|
||||
|
||||
@@ -66,15 +66,11 @@
|
||||
|
||||
- the corresponding `volumeMounts` in the Pod spec
|
||||
|
||||
- the label `consul` has been changed to `persistentconsul`
|
||||
<br/>
|
||||
(to avoid conflicts with the other Stateful Set)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Apply the persistent Consul YAML file:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/persistent-consul.yaml
|
||||
kubectl apply -f ~/container.training/k8s/consul-3.yaml
|
||||
```
|
||||
|
||||
]
|
||||
@@ -97,7 +93,7 @@
|
||||
kubectl get pv
|
||||
```
|
||||
|
||||
- The Pod `persistentconsul-0` is not scheduled yet:
|
||||
- The Pod `consul-0` is not scheduled yet:
|
||||
```bash
|
||||
kubectl get pods -o wide
|
||||
```
|
||||
@@ -112,9 +108,9 @@
|
||||
|
||||
- In a Stateful Set, the Pods are started one by one
|
||||
|
||||
- `persistentconsul-1` won't be created until `persistentconsul-0` is running
|
||||
- `consul-1` won't be created until `consul-0` is running
|
||||
|
||||
- `persistentconsul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
- `consul-0` has a dependency on an unbound Persistent Volume Claim
|
||||
|
||||
- The scheduler won't schedule the Pod until the PVC is bound
|
||||
|
||||
@@ -152,7 +148,7 @@
|
||||
|
||||
- Once a PVC is bound, its pod can start normally
|
||||
|
||||
- Once the pod `persistentconsul-0` has started, `persistentconsul-1` can be created, etc.
|
||||
- Once the pod `consul-0` has started, `consul-1` can be created, etc.
|
||||
|
||||
- Eventually, our Consul cluster is up, and backend by "persistent" volumes
|
||||
|
||||
@@ -160,7 +156,7 @@
|
||||
|
||||
- Check that our Consul clusters has 3 members indeed:
|
||||
```bash
|
||||
kubectl exec persistentconsul-0 -- consul members
|
||||
kubectl exec consul-0 -- consul members
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
@@ -218,7 +218,9 @@ consul agent -data-dir=/consul/data -client=0.0.0.0 -server -ui \
|
||||
|
||||
- Replace X.X.X.X and Y.Y.Y.Y with the addresses of other nodes
|
||||
|
||||
- The same command-line can be used on all nodes (convenient!)
|
||||
- A node can add its own address (it will work fine)
|
||||
|
||||
- ... Which means that we can use the same command-line on all nodes (convenient!)
|
||||
|
||||
---
|
||||
|
||||
@@ -258,19 +260,13 @@ consul agent -data-dir=/consul/data -client=0.0.0.0 -server -ui \
|
||||
|
||||
## Putting it all together
|
||||
|
||||
- The file `k8s/consul.yaml` defines the required resources
|
||||
- The file `k8s/consul-1.yaml` defines the required resources
|
||||
|
||||
(service account, cluster role, cluster role binding, service, stateful set)
|
||||
(service account, role, role binding, service, stateful set)
|
||||
|
||||
- It has a few extra touches:
|
||||
- Inspired by this [excellent tutorial](https://github.com/kelseyhightower/consul-on-kubernetes) by Kelsey Hightower
|
||||
|
||||
- a `podAntiAffinity` prevents two pods from running on the same node
|
||||
|
||||
- a `preStop` hook makes the pod leave the cluster when shutdown gracefully
|
||||
|
||||
This was inspired by this [excellent tutorial](https://github.com/kelseyhightower/consul-on-kubernetes) by Kelsey Hightower.
|
||||
Some features from the original tutorial (TLS authentication between
|
||||
nodes and encryption of gossip traffic) were removed for simplicity.
|
||||
(many features from the original tutorial were removed for simplicity)
|
||||
|
||||
---
|
||||
|
||||
@@ -282,7 +278,7 @@ nodes and encryption of gossip traffic) were removed for simplicity.
|
||||
|
||||
- Create the stateful set and associated service:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/consul.yaml
|
||||
kubectl apply -f ~/container.training/k8s/consul-1.yaml
|
||||
```
|
||||
|
||||
- Check the logs as the pods come up one after another:
|
||||
@@ -306,6 +302,88 @@ nodes and encryption of gossip traffic) were removed for simplicity.
|
||||
|
||||
## Caveats
|
||||
|
||||
- The scheduler may place two Consul pods on the same node
|
||||
|
||||
- if that node fails, we lose two Consul pods at the same time
|
||||
- this will cause the cluster to fail
|
||||
|
||||
- Scaling down the cluster will cause it to fail
|
||||
|
||||
- when a Consul member leaves the cluster, it needs to inform the others
|
||||
- otherwise, the last remaining node doesn't have quorum and stops functioning
|
||||
|
||||
- This Consul cluster doesn't use real persistence yet
|
||||
|
||||
- data is stored in the containers' ephemeral filesystem
|
||||
- if a pod fails, its replacement starts from a blank slate
|
||||
|
||||
---
|
||||
|
||||
## Improving pod placement
|
||||
|
||||
- We need to tell the scheduler:
|
||||
|
||||
*do not put two of these pods on the same node!*
|
||||
|
||||
- This is done with an `affinity` section like the following one:
|
||||
```yaml
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using a lifecycle hook
|
||||
|
||||
- When a Consul member leaves the cluster, it needs to execute:
|
||||
```bash
|
||||
consul leave
|
||||
```
|
||||
|
||||
- This is done with a `lifecycle` section like the following one:
|
||||
```yaml
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running a better Consul cluster
|
||||
|
||||
- Let's try to add the scheduling constraint and lifecycle hook
|
||||
|
||||
- We can do that in the same namespace or another one (as we like)
|
||||
|
||||
- If we do that in the same namespace, we will see a rolling update
|
||||
|
||||
(pods will be replaced one by one)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy a better Consul cluster:
|
||||
```bash
|
||||
kubectl apply -f ~/container.training/k8s/consul-2.yaml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Still no persistence, though
|
||||
|
||||
- We aren't using actual persistence yet
|
||||
|
||||
(no `volumeClaimTemplate`, Persistent Volume, etc.)
|
||||
|
||||
Reference in New Issue
Block a user